diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
index 40602517ca52..e51e42d9f646 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -5020,6 +5020,14 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
                        Disables the PV optimizations forcing the HVM guest to
                        run as generic HVM guest with no PV drivers.
 
+       xen.event_eoi_delay=    [XEN]
+                       How long to delay EOI handling in case of event
+                       storms (jiffies). Default is 10.
+
+       xen.event_loop_timeout= [XEN]
+                       After which time (jiffies) the event handling loop
+                       should start to delay EOI handling. Default is 2.
+
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Makefile b/Makefile
index c6fcfe4bfeed..27314b9f0fe6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 243
+SUBLEVEL = 244
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index df60b58691e7..1808c57ce161 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -1117,7 +1117,7 @@ static int pt_event_addr_filters_validate(struct 
list_head *filters)
                if (!filter->range || !filter->size)
                        return -EOPNOTSUPP;
 
-               if (!filter->inode) {
+               if (!filter->path.dentry) {
                        if (!valid_kernel_ip(filter->offset))
                                return -EINVAL;
 
@@ -1144,7 +1144,7 @@ static void pt_event_addr_filters_sync(struct perf_event 
*event)
                return;
 
        list_for_each_entry(filter, &head->list, entry) {
-               if (filter->inode && !offs[range]) {
+               if (filter->path.dentry && !offs[range]) {
                        msr_a = msr_b = 0;
                } else {
                        /* apply the offset */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 85c1cc0305f3..f8a7aba4b095 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1248,6 +1248,14 @@ static int ssb_prctl_set(struct task_struct *task, 
unsigned long ctrl)
        return 0;
 }
 
+static bool is_spec_ib_user_controlled(void)
+{
+       return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+               spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+               spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+               spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
+}
+
 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
 {
        switch (ctrl) {
@@ -1255,17 +1263,26 @@ static int ib_prctl_set(struct task_struct *task, 
unsigned long ctrl)
                if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
                    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                        return 0;
-               /*
-                * Indirect branch speculation is always disabled in strict
-                * mode. It can neither be enabled if it was force-disabled
-                * by a  previous prctl call.
 
+               /*
+                * With strict mode for both IBPB and STIBP, the instruction
+                * code paths avoid checking this task flag and instead,
+                * unconditionally run the instruction. However, STIBP and IBPB
+                * are independent and either can be set to conditionally
+                * enabled regardless of the mode of the other.
+                *
+                * If either is set to conditional, allow the task flag to be
+                * updated, unless it was force-disabled by a previous prctl
+                * call. Currently, this is possible on an AMD CPU which has the
+                * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
+                * kernel is booted with 'spectre_v2_user=seccomp', then
+                * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
+                * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
                 */
-               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
+               if (!is_spec_ib_user_controlled() ||
                    task_spec_ib_force_disable(task))
                        return -EPERM;
+
                task_clear_spec_ib_disable(task);
                task_update_spec_tif(task);
                break;
@@ -1278,10 +1295,10 @@ static int ib_prctl_set(struct task_struct *task, 
unsigned long ctrl)
                if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
                    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                        return -EPERM;
-               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+
+               if (!is_spec_ib_user_controlled())
                        return 0;
+
                task_set_spec_ib_disable(task);
                if (ctrl == PR_SPEC_FORCE_DISABLE)
                        task_set_spec_ib_force_disable(task);
@@ -1344,20 +1361,17 @@ static int ib_prctl_get(struct task_struct *task)
        if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
            spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                return PR_SPEC_ENABLE;
-       else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
-           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
-           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
-               return PR_SPEC_DISABLE;
-       else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
-           spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
-           spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
-           spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
+       else if (is_spec_ib_user_controlled()) {
                if (task_spec_ib_force_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
                if (task_spec_ib_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
                return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
-       } else
+       } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+               return PR_SPEC_DISABLE;
+       else
                return PR_SPEC_NOT_AFFECTED;
 }
 
diff --git a/drivers/block/xen-blkback/blkback.c 
b/drivers/block/xen-blkback/blkback.c
index a700e525535c..4f643a87f9c7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -183,7 +183,7 @@ static inline void shrink_free_pagepool(struct 
xen_blkif_ring *ring, int num)
 
 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
 
-static int do_block_io_op(struct xen_blkif_ring *ring);
+static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int 
*eoi_flags);
 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
                                struct blkif_request *req,
                                struct pending_req *pending_req);
@@ -608,6 +608,8 @@ int xen_blkif_schedule(void *arg)
        struct xen_vbd *vbd = &blkif->vbd;
        unsigned long timeout;
        int ret;
+       bool do_eoi;
+       unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
 
        set_freezable();
        while (!kthread_should_stop()) {
@@ -632,16 +634,23 @@ int xen_blkif_schedule(void *arg)
                if (timeout == 0)
                        goto purge_gnt_list;
 
+               do_eoi = ring->waiting_reqs;
+
                ring->waiting_reqs = 0;
                smp_mb(); /* clear flag *before* checking for work */
 
-               ret = do_block_io_op(ring);
+               ret = do_block_io_op(ring, &eoi_flags);
                if (ret > 0)
                        ring->waiting_reqs = 1;
                if (ret == -EACCES)
                        wait_event_interruptible(ring->shutdown_wq,
                                                 kthread_should_stop());
 
+               if (do_eoi && !ring->waiting_reqs) {
+                       xen_irq_lateeoi(ring->irq, eoi_flags);
+                       eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
+               }
+
 purge_gnt_list:
                if (blkif->vbd.feature_gnt_persistent &&
                    time_after(jiffies, ring->next_lru)) {
@@ -1117,7 +1126,7 @@ static void end_block_io_op(struct bio *bio)
  * and transmute  it to the block API to hand it over to the proper block disk.
  */
 static int
-__do_block_io_op(struct xen_blkif_ring *ring)
+__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
 {
        union blkif_back_rings *blk_rings = &ring->blk_rings;
        struct blkif_request req;
@@ -1140,6 +1149,9 @@ __do_block_io_op(struct xen_blkif_ring *ring)
                if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
                        break;
 
+               /* We've seen a request, so clear spurious eoi flag. */
+               *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
+
                if (kthread_should_stop()) {
                        more_to_do = 1;
                        break;
@@ -1198,13 +1210,13 @@ __do_block_io_op(struct xen_blkif_ring *ring)
 }
 
 static int
-do_block_io_op(struct xen_blkif_ring *ring)
+do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
 {
        union blkif_back_rings *blk_rings = &ring->blk_rings;
        int more_to_do;
 
        do {
-               more_to_do = __do_block_io_op(ring);
+               more_to_do = __do_block_io_op(ring, eoi_flags);
                if (more_to_do)
                        break;
 
diff --git a/drivers/block/xen-blkback/xenbus.c 
b/drivers/block/xen-blkback/xenbus.c
index 1d1f86657967..702ebfc4face 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -236,9 +236,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, 
grant_ref_t *gref,
                BUG();
        }
 
-       err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
-                                                   xen_blkif_be_int, 0,
-                                                   "blkif-backend", ring);
+       err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
+                       evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
        if (err < 0) {
                xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
                ring->blk_rings.common.sring = NULL;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c417aa19f996..4cbc73173701 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1211,7 +1211,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
 
        fast_mix(fast_pool);
        add_interrupt_bench(cycles);
-       this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
 
        if (unlikely(crng_init == 0)) {
                if ((fast_pool->count >= 64) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952acc7133..2934443fbd4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1053,22 +1053,19 @@ static int cik_sdma_soft_reset(void *handle)
 {
        u32 srbm_soft_reset = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       u32 tmp = RREG32(mmSRBM_STATUS2);
+       u32 tmp;
 
-       if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
-               /* sdma0 */
-               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
-               tmp |= SDMA0_F32_CNTL__HALT_MASK;
-               WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
-               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
-       }
-       if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
-               /* sdma1 */
-               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
-               tmp |= SDMA0_F32_CNTL__HALT_MASK;
-               WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
-               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
-       }
+       /* sdma0 */
+       tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+       tmp |= SDMA0_F32_CNTL__HALT_MASK;
+       WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+       srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+       /* sdma1 */
+       tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+       tmp |= SDMA0_F32_CNTL__HALT_MASK;
+       WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+       srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
 
        if (srbm_soft_reset) {
                tmp = RREG32(mmSRBM_SOFT_RESET);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 78eb10902809..076b6da44f46 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -350,6 +350,7 @@ int psb_irq_postinstall(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
+       unsigned int i;
 
        spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
 
@@ -362,20 +363,12 @@ int psb_irq_postinstall(struct drm_device *dev)
        PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
        PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
-       if (dev->vblank[0].enabled)
-               psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-       else
-               psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
-       if (dev->vblank[1].enabled)
-               psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-       else
-               psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
-       if (dev->vblank[2].enabled)
-               psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
-       else
-               psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+       for (i = 0; i < dev->num_crtcs; ++i) {
+               if (dev->vblank[i].enabled)
+                       psb_enable_pipestat(dev_priv, i, 
PIPE_VBLANK_INTERRUPT_ENABLE);
+               else
+                       psb_disable_pipestat(dev_priv, i, 
PIPE_VBLANK_INTERRUPT_ENABLE);
+       }
 
        if (dev_priv->ops->hotplug_enable)
                dev_priv->ops->hotplug_enable(dev, true);
@@ -388,6 +381,7 @@ void psb_irq_uninstall(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
+       unsigned int i;
 
        spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
 
@@ -396,14 +390,10 @@ void psb_irq_uninstall(struct drm_device *dev)
 
        PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
-       if (dev->vblank[0].enabled)
-               psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
-       if (dev->vblank[1].enabled)
-               psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
-       if (dev->vblank[2].enabled)
-               psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+       for (i = 0; i < dev->num_crtcs; ++i) {
+               if (dev->vblank[i].enabled)
+                       psb_disable_pipestat(dev_priv, i, 
PIPE_VBLANK_INTERRUPT_ENABLE);
+       }
 
        dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
                                  _PSB_IRQ_MSVDX_FLAG |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index da3fbf82d1cf..e19c05d9e84b 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -383,7 +383,11 @@ extern bool amd_iommu_np_cache;
 /* Only true if all IOMMUs support device IOTLBs */
 extern bool amd_iommu_iotlb_sup;
 
-#define MAX_IRQS_PER_TABLE     256
+/*
+ * AMD IOMMU hardware only support 512 IRTEs despite
+ * the architectural limitation of 2048 entries.
+ */
+#define MAX_IRQS_PER_TABLE     512
 #define IRQ_TABLE_ALIGNMENT    128
 
 struct irq_remap_table {
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index d2bfabecd882..f9d3211ce599 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -152,11 +152,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl)
  *
  * @cl: host client
  *
- * Return: mtu
+ * Return: mtu or 0 if client is not connected
  */
 static inline size_t mei_cl_mtu(const struct mei_cl *cl)
 {
-       return cl->me_cl->props.max_msg_length;
+       return cl->me_cl ? cl->me_cl->props.max_msg_length : 0;
 }
 
 /**
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index ffc5467a1ec2..617eb75c7c0c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -469,9 +469,13 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, 
unsigned int idx, u8
                 */
                struct sk_buff *skb = priv->echo_skb[idx];
                struct canfd_frame *cf = (struct canfd_frame *)skb->data;
-               u8 len = cf->len;
 
-               *len_ptr = len;
+               /* get the real payload length for netdev statistics */
+               if (cf->can_id & CAN_RTR_FLAG)
+                       *len_ptr = 0;
+               else
+                       *len_ptr = cf->len;
+
                priv->echo_skb[idx] = NULL;
 
                return skb;
@@ -496,7 +500,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, 
unsigned int idx)
        if (!skb)
                return 0;
 
-       netif_rx(skb);
+       skb_get(skb);
+       if (netif_rx(skb) == NET_RX_SUCCESS)
+               dev_consume_skb_any(skb);
+       else
+               dev_kfree_skb_any(skb);
 
        return len;
 }
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 6cd4317fe94d..74b37309efab 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -152,14 +152,55 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, 
u32 ts,
        /* protect from getting timeval before setting now */
        if (time_ref->tv_host.tv_sec > 0) {
                u64 delta_us;
+               s64 delta_ts = 0;
+
+               /* General case: dev_ts_1 < dev_ts_2 < ts, with:
+                *
+                * - dev_ts_1 = previous sync timestamp
+                * - dev_ts_2 = last sync timestamp
+                * - ts = event timestamp
+                * - ts_period = known sync period (theoretical)
+                *             ~ dev_ts2 - dev_ts1
+                * *but*:
+                *
+                * - time counters wrap (see adapter->ts_used_bits)
+                * - sometimes, dev_ts_1 < ts < dev_ts2
+                *
+                * "normal" case (sync time counters increase):
+                * must take into account case when ts wraps (tsw)
+                *
+                *      < ts_period > <          >
+                *     |             |            |
+                *  ---+--------+----+-------0-+--+-->
+                *     ts_dev_1 |    ts_dev_2  |
+                *              ts             tsw
+                */
+               if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
+                       /* case when event time (tsw) wraps */
+                       if (ts < time_ref->ts_dev_1)
+                               delta_ts = 1 << time_ref->adapter->ts_used_bits;
+
+               /* Otherwise, sync time counter (ts_dev_2) has wrapped:
+                * handle case when event time (tsn) hasn't.
+                *
+                *      < ts_period > <          >
+                *     |             |            |
+                *  ---+--------+--0-+---------+--+-->
+                *     ts_dev_1 |    ts_dev_2  |
+                *              tsn            ts
+                */
+               } else if (time_ref->ts_dev_1 < ts) {
+                       delta_ts = -(1 << time_ref->adapter->ts_used_bits);
+               }
 
-               delta_us = ts - time_ref->ts_dev_2;
-               if (ts < time_ref->ts_dev_2)
-                       delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
+               /* add delay between last sync and event timestamps */
+               delta_ts += (signed int)(ts - time_ref->ts_dev_2);
 
-               delta_us += time_ref->ts_total;
+               /* add time from beginning to last sync */
+               delta_ts += time_ref->ts_total;
 
-               delta_us *= time_ref->adapter->us_per_ts_scale;
+               /* convert ticks number into microseconds */
+               delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
                delta_us >>= time_ref->adapter->us_per_ts_shift;
 
                *tv = time_ref->tv_host_0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 40647b837b31..d314e73f3d06 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -475,12 +475,18 @@ static int pcan_usb_fd_decode_canmsg(struct 
pcan_usb_fd_if *usb_if,
                                     struct pucan_msg *rx_msg)
 {
        struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
-       struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
-       struct net_device *netdev = dev->netdev;
+       struct peak_usb_device *dev;
+       struct net_device *netdev;
        struct canfd_frame *cfd;
        struct sk_buff *skb;
        const u16 rx_msg_flags = le16_to_cpu(rm->flags);
 
+       if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
+               return -ENOMEM;
+
+       dev = usb_if->dev[pucan_msg_get_channel(rm)];
+       netdev = dev->netdev;
+
        if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
                /* CANFD frame case */
                skb = alloc_canfd_skb(netdev, &cfd);
@@ -527,15 +533,21 @@ static int pcan_usb_fd_decode_status(struct 
pcan_usb_fd_if *usb_if,
                                     struct pucan_msg *rx_msg)
 {
        struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
-       struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
-       struct pcan_usb_fd_device *pdev =
-                       container_of(dev, struct pcan_usb_fd_device, dev);
+       struct pcan_usb_fd_device *pdev;
        enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
        enum can_state rx_state, tx_state;
-       struct net_device *netdev = dev->netdev;
+       struct peak_usb_device *dev;
+       struct net_device *netdev;
        struct can_frame *cf;
        struct sk_buff *skb;
 
+       if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
+               return -ENOMEM;
+
+       dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+       pdev = container_of(dev, struct pcan_usb_fd_device, dev);
+       netdev = dev->netdev;
+
        /* nothing should be sent while in BUS_OFF state */
        if (dev->can.state == CAN_STATE_BUS_OFF)
                return 0;
@@ -588,9 +600,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if 
*usb_if,
                                    struct pucan_msg *rx_msg)
 {
        struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
-       struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
-       struct pcan_usb_fd_device *pdev =
-                       container_of(dev, struct pcan_usb_fd_device, dev);
+       struct pcan_usb_fd_device *pdev;
+       struct peak_usb_device *dev;
+
+       if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
+               return -EINVAL;
+
+       dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+       pdev = container_of(dev, struct pcan_usb_fd_device, dev);
 
        /* keep a trace of tx and rx error counters for later use */
        pdev->bec.txerr = er->tx_err_cnt;
@@ -604,11 +621,17 @@ static int pcan_usb_fd_decode_overrun(struct 
pcan_usb_fd_if *usb_if,
                                      struct pucan_msg *rx_msg)
 {
        struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
-       struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
-       struct net_device *netdev = dev->netdev;
+       struct peak_usb_device *dev;
+       struct net_device *netdev;
        struct can_frame *cf;
        struct sk_buff *skb;
 
+       if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
+               return -EINVAL;
+
+       dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+       netdev = dev->netdev;
+
        /* allocate an skb to store the error frame */
        skb = alloc_can_err_skb(netdev, &cf);
        if (!skb)
@@ -726,6 +749,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device 
*dev,
        u16 tx_msg_size, tx_msg_flags;
        u8 can_dlc;
 
+       if (cfd->len > CANFD_MAX_DLEN)
+               return -EINVAL;
+
        tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
        tx_msg->size = cpu_to_le16(tx_msg_size);
        tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 
b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 7484ad3c955d..e3e02ec8f149 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -188,7 +188,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf 
*vf, u16 vsi_id)
  * check for the valid queue id
  **/
 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
-                                           u8 qid)
+                                           u16 qid)
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
@@ -203,7 +203,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf 
*vf, u16 vsi_id,
  *
  * check for the valid vector id
  **/
-static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
 {
        struct i40e_pf *pf = vf->pf;
 
@@ -417,11 +417,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
        u32 v_idx, i, reg_idx, reg;
        u32 next_q_idx, next_q_type;
        u32 msix_vf, size;
+       int ret = 0;
+
+       msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+       if (qvlist_info->num_vectors > msix_vf) {
+               dev_warn(&pf->pdev->dev,
+                        "Incorrect number of iwarp vectors %u. Maximum %u 
allowed.\n",
+                        qvlist_info->num_vectors,
+                        msix_vf);
+               ret = -EINVAL;
+               goto err_out;
+       }
 
        size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
               (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
                                                (qvlist_info->num_vectors - 1));
+       kfree(vf->qvlist_info);
        vf->qvlist_info = kzalloc(size, GFP_KERNEL);
+       if (!vf->qvlist_info) {
+               ret = -ENOMEM;
+               goto err_out;
+       }
        vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
 
        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -432,8 +449,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
                v_idx = qv_info->v_idx;
 
                /* Validate vector id belongs to this vf */
-               if (!i40e_vc_isvalid_vector_id(vf, v_idx))
-                       goto err;
+               if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
+                       ret = -EINVAL;
+                       goto err_free;
+               }
 
                vf->qvlist_info->qv_info[i] = *qv_info;
 
@@ -475,10 +494,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
        }
 
        return 0;
-err:
+err_free:
        kfree(vf->qvlist_info);
        vf->qvlist_info = NULL;
-       return -EINVAL;
+err_out:
+       return ret;
 }
 
 /**
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 35d8c636de12..d89995f4bd43 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -732,7 +732,8 @@ static int geneve6_build_skb(struct dst_entry *dst, struct 
sk_buff *skb,
 static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
                                       struct net_device *dev,
                                       struct flowi4 *fl4,
-                                      struct ip_tunnel_info *info)
+                                      struct ip_tunnel_info *info,
+                                      __be16 dport, __be16 sport)
 {
        bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
        struct geneve_dev *geneve = netdev_priv(dev);
@@ -746,6 +747,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
        memset(fl4, 0, sizeof(*fl4));
        fl4->flowi4_mark = skb->mark;
        fl4->flowi4_proto = IPPROTO_UDP;
+       fl4->fl4_dport = dport;
+       fl4->fl4_sport = sport;
 
        if (info) {
                fl4->daddr = info->key.u.ipv4.dst;
@@ -791,7 +794,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
 static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
                                           struct net_device *dev,
                                           struct flowi6 *fl6,
-                                          struct ip_tunnel_info *info)
+                                          struct ip_tunnel_info *info,
+                                          __be16 dport, __be16 sport)
 {
        bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
        struct geneve_dev *geneve = netdev_priv(dev);
@@ -807,6 +811,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff 
*skb,
        memset(fl6, 0, sizeof(*fl6));
        fl6->flowi6_mark = skb->mark;
        fl6->flowi6_proto = IPPROTO_UDP;
+       fl6->fl6_dport = dport;
+       fl6->fl6_sport = sport;
 
        if (info) {
                fl6->daddr = info->key.u.ipv6.dst;
@@ -894,13 +900,14 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
                        goto tx_error;
        }
 
-       rt = geneve_get_v4_rt(skb, dev, &fl4, info);
+       sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+       rt = geneve_get_v4_rt(skb, dev, &fl4, info,
+                             geneve->dst_port, sport);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto tx_error;
        }
 
-       sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        skb_reset_mac_header(skb);
 
        if (info) {
@@ -983,13 +990,14 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
                }
        }
 
-       dst = geneve_get_v6_dst(skb, dev, &fl6, info);
+       sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+       dst = geneve_get_v6_dst(skb, dev, &fl6, info,
+                               geneve->dst_port, sport);
        if (IS_ERR(dst)) {
                err = PTR_ERR(dst);
                goto tx_error;
        }
 
-       sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        skb_reset_mac_header(skb);
 
        if (info) {
@@ -1114,9 +1122,14 @@ static int geneve_fill_metadata_dst(struct net_device 
*dev, struct sk_buff *skb)
        struct dst_entry *dst;
        struct flowi6 fl6;
 #endif
+       __be16 sport;
 
        if (ip_tunnel_info_af(info) == AF_INET) {
-               rt = geneve_get_v4_rt(skb, dev, &fl4, info);
+               sport = udp_flow_src_port(geneve->net, skb,
+                                         1, USHRT_MAX, true);
+
+               rt = geneve_get_v4_rt(skb, dev, &fl4, info,
+                                     geneve->dst_port, sport);
                if (IS_ERR(rt))
                        return PTR_ERR(rt);
 
@@ -1124,7 +1137,11 @@ static int geneve_fill_metadata_dst(struct net_device 
*dev, struct sk_buff *skb)
                info->key.u.ipv4.src = fl4.saddr;
 #if IS_ENABLED(CONFIG_IPV6)
        } else if (ip_tunnel_info_af(info) == AF_INET6) {
-               dst = geneve_get_v6_dst(skb, dev, &fl6, info);
+               sport = udp_flow_src_port(geneve->net, skb,
+                                         1, USHRT_MAX, true);
+
+               dst = geneve_get_v6_dst(skb, dev, &fl6, info,
+                                       geneve->dst_port, sport);
                if (IS_ERR(dst))
                        return PTR_ERR(dst);
 
@@ -1135,8 +1152,7 @@ static int geneve_fill_metadata_dst(struct net_device 
*dev, struct sk_buff *skb)
                return -EINVAL;
        }
 
-       info->key.tp_src = udp_flow_src_port(geneve->net, skb,
-                                            1, USHRT_MAX, true);
+       info->key.tp_src = sport;
        info->key.tp_dst = geneve->dst_port;
        return 0;
 }
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index b87fe0a01c69..3c02473a20f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -903,6 +903,7 @@ static ssize_t cosa_write(struct file *file,
                        chan->tx_status = 1;
                        spin_unlock_irqrestore(&cosa->lock, flags);
                        up(&chan->wsem);
+                       kfree(kbuf);
                        return -ERESTARTSYS;
                }
        }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 
b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2eb169b204f8..6a9c9b4ef2c9 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -972,7 +972,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        struct ath_htc_rx_status *rxstatus;
        struct ath_rx_status rx_stats;
        bool decrypt_error = false;
-       __be16 rs_datalen;
+       u16 rs_datalen;
        bool is_phyerr;
 
        if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5b1d2e8402d9..347c796afd4e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
        struct xenvif *vif; /* Parent VIF */
 
+       /*
+        * TX/RX common EOI handling.
+        * When feature-split-event-channels = 0, interrupt handler sets
+        * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
+        * by the RX and TX interrupt handlers.
+        * RX and TX handler threads will issue an EOI when either
+        * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
+        * NETBK_TX_EOI) are set and they will reset those bits.
+        */
+       atomic_t eoi_pending;
+#define NETBK_RX_EOI           0x01
+#define NETBK_TX_EOI           0x02
+#define NETBK_COMMON_EOI       0x04
+
        /* Use NAPI for guest TX */
        struct napi_struct napi;
        /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -356,6 +370,7 @@ int xenvif_dealloc_kthread(void *data);
 
 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
 
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
 void xenvif_rx_action(struct xenvif_queue *queue);
 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
 
diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
index 46008f284550..e61073c75142 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -76,12 +76,28 @@ int xenvif_schedulable(struct xenvif *vif)
                !vif->disabled;
 }
 
+static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
+{
+       bool rc;
+
+       rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
+       if (rc)
+               napi_schedule(&queue->napi);
+       return rc;
+}
+
 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 {
        struct xenvif_queue *queue = dev_id;
+       int old;
 
-       if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
-               napi_schedule(&queue->napi);
+       old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
+       WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
+
+       if (!xenvif_handle_tx_interrupt(queue)) {
+               atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
+               xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+       }
 
        return IRQ_HANDLED;
 }
@@ -115,19 +131,46 @@ static int xenvif_poll(struct napi_struct *napi, int 
budget)
        return work_done;
 }
 
+static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
+{
+       bool rc;
+
+       rc = xenvif_have_rx_work(queue, false);
+       if (rc)
+               xenvif_kick_thread(queue);
+       return rc;
+}
+
 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
 {
        struct xenvif_queue *queue = dev_id;
+       int old;
 
-       xenvif_kick_thread(queue);
+       old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
+       WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
+
+       if (!xenvif_handle_rx_interrupt(queue)) {
+               atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
+               xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+       }
 
        return IRQ_HANDLED;
 }
 
 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
 {
-       xenvif_tx_interrupt(irq, dev_id);
-       xenvif_rx_interrupt(irq, dev_id);
+       struct xenvif_queue *queue = dev_id;
+       int old;
+
+       old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
+       WARN(old, "Interrupt while EOI pending\n");
+
+       /* Use bitwise or as we need to call both functions. */
+       if ((!xenvif_handle_tx_interrupt(queue) |
+            !xenvif_handle_rx_interrupt(queue))) {
+               atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
+               xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+       }
 
        return IRQ_HANDLED;
 }
@@ -583,7 +626,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t 
ring_ref,
        shared = (struct xen_netif_ctrl_sring *)addr;
        BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
 
-       err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
+       err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
        if (err < 0)
                goto err_unmap;
 
@@ -641,7 +684,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
 
        if (tx_evtchn == rx_evtchn) {
                /* feature-split-event-channels == 0 */
-               err = bind_interdomain_evtchn_to_irqhandler(
+               err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
                        queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
                        queue->name, queue);
                if (err < 0)
@@ -652,7 +695,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
                /* feature-split-event-channels == 1 */
                snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
                         "%s-tx", queue->name);
-               err = bind_interdomain_evtchn_to_irqhandler(
+               err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
                        queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
                        queue->tx_irq_name, queue);
                if (err < 0)
@@ -662,7 +705,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
 
                snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
                         "%s-rx", queue->name);
-               err = bind_interdomain_evtchn_to_irqhandler(
+               err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
                        queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
                        queue->rx_irq_name, queue);
                if (err < 0)
diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
index a469fbe1abaf..fd2ac6cd0c69 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -162,6 +162,10 @@ void xenvif_napi_schedule_or_enable_events(struct 
xenvif_queue *queue)
 
        if (more_to_do)
                napi_schedule(&queue->napi);
+       else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
+                                    &queue->eoi_pending) &
+                (NETBK_TX_EOI | NETBK_COMMON_EOI))
+               xen_irq_lateeoi(queue->tx_irq, 0);
 }
 
 static void tx_add_credit(struct xenvif_queue *queue)
@@ -1615,9 +1619,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
 {
        struct xenvif *vif = data;
+       unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
 
-       while (xenvif_ctrl_work_todo(vif))
+       while (xenvif_ctrl_work_todo(vif)) {
                xenvif_ctrl_action(vif);
+               eoi_flag = 0;
+       }
+
+       xen_irq_lateeoi(irq, eoi_flag);
 
        return IRQ_HANDLED;
 }
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index b1cf7c6f407a..f152246c7dfb 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -490,13 +490,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue 
*queue)
        return queue->stalled && prod - cons >= 1;
 }
 
-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
 {
        return xenvif_rx_ring_slots_available(queue) ||
                (queue->vif->stall_timeout &&
                 (xenvif_rx_queue_stalled(queue) ||
                  xenvif_rx_queue_ready(queue))) ||
-               kthread_should_stop() ||
+               (test_kthread && kthread_should_stop()) ||
                queue->vif->disabled;
 }
 
@@ -527,15 +527,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue 
*queue)
 {
        DEFINE_WAIT(wait);
 
-       if (xenvif_have_rx_work(queue))
+       if (xenvif_have_rx_work(queue, true))
                return;
 
        for (;;) {
                long ret;
 
                prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
-               if (xenvif_have_rx_work(queue))
+               if (xenvif_have_rx_work(queue, true))
                        break;
+               if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
+                                       &queue->eoi_pending) &
+                   (NETBK_RX_EOI | NETBK_COMMON_EOI))
+                       xen_irq_lateeoi(queue->rx_irq, 0);
+
                ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
                if (!ret)
                        break;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 37619bb2c97a..d188eacbd3b8 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -901,11 +901,13 @@ EXPORT_SYMBOL_GPL(of_dma_get_range);
  */
 bool of_dma_is_coherent(struct device_node *np)
 {
-       struct device_node *node = of_node_get(np);
+       struct device_node *node;
 
        if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
                return true;
 
+       node = of_node_get(np);
+
        while (node) {
                if (of_property_read_bool(node, "dma-coherent")) {
                        of_node_put(node);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c 
b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 49aeba912531..23d2f0ba12db 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -387,13 +387,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, 
unsigned int function,
 static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
 {
        /*
-        * The signal type is GPIO if the signal name has "GPIO" as a prefix.
+        * The signal type is GPIO if the signal name has "GPI" as a prefix.
         * strncmp (rather than strcmp) is used to implement the prefix
         * requirement.
         *
-        * expr->signal might look like "GPIOT3" in the GPIO case.
+        * expr->signal might look like "GPIOB1" in the GPIO case.
+        * expr->signal might look like "GPIT0" in the GPI case.
         */
-       return strncmp(expr->signal, "GPIO", 4) == 0;
+       return strncmp(expr->signal, "GPI", 3) == 0;
 }
 
 static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 54dad89fc9bf..d32aedfc6dd0 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -40,6 +40,13 @@ struct pinctrl_dt_map {
 static void dt_free_map(struct pinctrl_dev *pctldev,
                     struct pinctrl_map *map, unsigned num_maps)
 {
+       int i;
+
+       for (i = 0; i < num_maps; ++i) {
+               kfree_const(map[i].dev_name);
+               map[i].dev_name = NULL;
+       }
+
        if (pctldev) {
                const struct pinctrl_ops *ops = pctldev->desc->pctlops;
                ops->dt_free_map(pctldev, map, num_maps);
@@ -73,7 +80,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const 
char *statename,
 
        /* Initialize common mapping table entry fields */
        for (i = 0; i < num_maps; i++) {
-               map[i].dev_name = dev_name(p->dev);
+               const char *devname;
+
+               devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
+               if (!devname)
+                       goto err_free_map;
+
+               map[i].dev_name = devname;
                map[i].name = statename;
                if (pctldev)
                        map[i].ctrl_dev_name = dev_name(pctldev->dev);
@@ -81,11 +94,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const 
char *statename,
 
        /* Remember the converted mapping table entries */
        dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
-       if (!dt_map) {
-               dev_err(p->dev, "failed to alloc struct pinctrl_dt_map\n");
-               dt_free_map(pctldev, map, num_maps);
-               return -ENOMEM;
-       }
+       if (!dt_map)
+               goto err_free_map;
 
        dt_map->pctldev = pctldev;
        dt_map->map = map;
@@ -93,6 +103,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const 
char *statename,
        list_add_tail(&dt_map->node, &p->dt_maps);
 
        return pinctrl_register_map(map, num_maps, false);
+
+err_free_map:
+       dt_free_map(pctldev, map, num_maps);
+       return -ENOMEM;
 }
 
 struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index a5b7bd3c9bac..82fac6261efa 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -140,7 +140,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, 
unsigned offset,
                        pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
                        pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
                } else if (debounce < 250000) {
-                       time = debounce / 15600;
+                       time = debounce / 15625;
                        pin_reg |= time & DB_TMR_OUT_MASK;
                        pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
                        pin_reg |= BIT(DB_TMR_LARGE_OFF);
@@ -150,14 +150,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, 
unsigned offset,
                        pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
                        pin_reg |= BIT(DB_TMR_LARGE_OFF);
                } else {
-                       pin_reg &= ~DB_CNTRl_MASK;
+                       pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
                        ret = -EINVAL;
                }
        } else {
                pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
                pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
                pin_reg &= ~DB_TMR_OUT_MASK;
-               pin_reg &= ~DB_CNTRl_MASK;
+               pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
        }
        writel(pin_reg, gpio_dev->base + offset * 4);
        spin_unlock_irqrestore(&gpio_dev->lock, flags);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 0f730e4bf6bc..0caf751d85de 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3185,6 +3185,8 @@ static int _regulator_get_voltage(struct regulator_dev 
*rdev)
                ret = rdev->desc->fixed_uV;
        } else if (rdev->supply) {
                ret = _regulator_get_voltage(rdev->supply->rdev);
+       } else if (rdev->supply_name) {
+               return -EPROBE_DEFER;
        } else {
                return -EINVAL;
        }
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c 
b/drivers/scsi/device_handler/scsi_dh_alua.c
index 60c288526355..2bc3dc6244a5 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -657,8 +657,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct 
alua_port_group *pg)
                                        rcu_read_lock();
                                        list_for_each_entry_rcu(h,
                                                &tmp_pg->dh_list, node) {
-                                               /* h->sdev should always be 
valid */
-                                               BUG_ON(!h->sdev);
+                                               if (!h->sdev)
+                                                       continue;
                                                h->sdev->access_state = desc[0];
                                        }
                                        rcu_read_unlock();
@@ -704,7 +704,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct 
alua_port_group *pg)
                        pg->expiry = 0;
                        rcu_read_lock();
                        list_for_each_entry_rcu(h, &pg->dh_list, node) {
-                               BUG_ON(!h->sdev);
+                               if (!h->sdev)
+                                       continue;
                                h->sdev->access_state =
                                        (pg->state & SCSI_ACCESS_STATE_MASK);
                                if (pg->pref)
@@ -1149,7 +1150,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
        spin_lock(&h->pg_lock);
        pg = h->pg;
        rcu_assign_pointer(h->pg, NULL);
-       h->sdev = NULL;
        spin_unlock(&h->pg_lock);
        if (pg) {
                spin_lock_irq(&pg->lock);
@@ -1158,6 +1158,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
                kref_put(&pg->kref, release_port_group);
        }
        sdev->handler_data = NULL;
+       synchronize_rcu();
        kfree(h);
 }
 
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index b82df8cdf962..7f1d6d52d48b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -8937,7 +8937,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const 
struct pci_device_id *ent)
        /* hook into SCSI subsystem */
        rc = hpsa_scsi_add_host(h);
        if (rc)
-               goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+               goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, 
lu, aer/h */
 
        /* Monitor the controller for firmware lockups */
        h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -8949,6 +8949,8 @@ static int hpsa_init_one(struct pci_dev *pdev, const 
struct pci_device_id *ent)
                                h->heartbeat_sample_interval);
        return 0;
 
+clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+       kfree(h->lastlogicals);
 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
        hpsa_free_performant_mode(h);
        h->access.set_intr_mask(h, HPSA_INTR_OFF);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 726291c5562d..90f4a5c8012c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1648,6 +1648,15 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
+       { USB_DEVICE(0x045b, 0x023c),   /* Renesas USB Download mode */
+       .driver_info = DISABLE_ECHO,    /* Don't echo banner */
+       },
+       { USB_DEVICE(0x045b, 0x0248),   /* Renesas USB Download mode */
+       .driver_info = DISABLE_ECHO,    /* Don't echo banner */
+       },
+       { USB_DEVICE(0x045b, 0x024D),   /* Renesas USB Download mode */
+       .driver_info = DISABLE_ECHO,    /* Don't echo banner */
+       },
        { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; 
[email protected] */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
diff --git a/drivers/usb/gadget/udc/goku_udc.c 
b/drivers/usb/gadget/udc/goku_udc.c
index 5107987bd353..d363224dce6f 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1772,6 +1772,7 @@ static int goku_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
                goto err;
        }
 
+       pci_set_drvdata(pdev, dev);
        spin_lock_init(&dev->lock);
        dev->pdev = pdev;
        dev->gadget.ops = &goku_ops;
@@ -1805,7 +1806,6 @@ static int goku_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
        }
        dev->regs = (struct goku_udc_regs __iomem *) base;
 
-       pci_set_drvdata(pdev, dev);
        INFO(dev, "%s\n", driver_desc);
        INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
        INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index bdff01095f54..c31c08a708be 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -90,6 +90,8 @@ static void evtchn_2l_unmask(unsigned port)
 
        BUG_ON(!irqs_disabled());
 
+       smp_wmb();      /* All writes before unmask must be visible. */
+
        if (unlikely((cpu != cpu_from_evtchn(port))))
                do_hypercall = 1;
        else {
@@ -158,7 +160,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu,
  * a bitset of words which contain pending event bits.  The second
  * level is a bitset of pending events themselves.
  */
-static void evtchn_2l_handle_events(unsigned cpu)
+static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl 
*ctrl)
 {
        int irq;
        xen_ulong_t pending_words;
@@ -239,10 +241,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
 
                        /* Process port. */
                        port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
-                       irq = get_evtchn_to_irq(port);
-
-                       if (irq != -1)
-                               generic_handle_irq(irq);
+                       handle_irq_for_port(port, ctrl);
 
                        bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
 
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 83bb9fdbadc6..5308bc1e0189 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -32,6 +32,10 @@
 #include <linux/slab.h>
 #include <linux/irqnr.h>
 #include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/cpuhotplug.h>
+#include <linux/atomic.h>
+#include <linux/ktime.h>
 
 #ifdef CONFIG_X86
 #include <asm/desc.h>
@@ -62,6 +66,15 @@
 
 #include "events_internal.h"
 
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "xen."
+
+static uint __read_mostly event_loop_timeout = 2;
+module_param(event_loop_timeout, uint, 0644);
+
+static uint __read_mostly event_eoi_delay = 10;
+module_param(event_eoi_delay, uint, 0644);
+
 const struct evtchn_ops *evtchn_ops;
 
 /*
@@ -70,6 +83,24 @@ const struct evtchn_ops *evtchn_ops;
  */
 static DEFINE_MUTEX(irq_mapping_update_lock);
 
+/*
+ * Lock protecting event handling loop against removing event channels.
+ * Adding of event channels is no issue as the associated IRQ becomes active
+ * only after everything is setup (before request_[threaded_]irq() the handler
+ * can't be entered for an event, as the event channel will be unmasked only
+ * then).
+ */
+static DEFINE_RWLOCK(evtchn_rwlock);
+
+/*
+ * Lock hierarchy:
+ *
+ * irq_mapping_update_lock
+ *   evtchn_rwlock
+ *     IRQ-desc lock
+ *       percpu eoi_list_lock
+ */
+
 static LIST_HEAD(xen_irq_list_head);
 
 /* IRQ <-> VIRQ mapping. */
@@ -94,17 +125,20 @@ static bool (*pirq_needs_eoi)(unsigned irq);
 static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
 
 static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_lateeoi_chip;
 static struct irq_chip xen_percpu_chip;
 static struct irq_chip xen_pirq_chip;
 static void enable_dynirq(struct irq_data *data);
 static void disable_dynirq(struct irq_data *data);
 
+static DEFINE_PER_CPU(unsigned int, irq_epoch);
+
 static void clear_evtchn_to_irq_row(unsigned row)
 {
        unsigned col;
 
        for (col = 0; col < EVTCHN_PER_ROW; col++)
-               evtchn_to_irq[row][col] = -1;
+               WRITE_ONCE(evtchn_to_irq[row][col], -1);
 }
 
 static void clear_evtchn_to_irq_all(void)
@@ -141,7 +175,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
                clear_evtchn_to_irq_row(row);
        }
 
-       evtchn_to_irq[row][col] = irq;
+       WRITE_ONCE(evtchn_to_irq[row][col], irq);
        return 0;
 }
 
@@ -151,7 +185,7 @@ int get_evtchn_to_irq(unsigned evtchn)
                return -1;
        if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
                return -1;
-       return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
+       return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
 }
 
 /* Get info for IRQ */
@@ -260,10 +294,14 @@ static void xen_irq_info_cleanup(struct irq_info *info)
  */
 unsigned int evtchn_from_irq(unsigned irq)
 {
-       if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
+       const struct irq_info *info = NULL;
+
+       if (likely(irq < nr_irqs))
+               info = info_for_irq(irq);
+       if (!info)
                return 0;
 
-       return info_for_irq(irq)->evtchn;
+       return info->evtchn;
 }
 
 unsigned irq_from_evtchn(unsigned int evtchn)
@@ -382,9 +420,157 @@ void notify_remote_via_irq(int irq)
 }
 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
 
+struct lateeoi_work {
+       struct delayed_work delayed;
+       spinlock_t eoi_list_lock;
+       struct list_head eoi_list;
+};
+
+static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
+
+static void lateeoi_list_del(struct irq_info *info)
+{
+       struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
+       unsigned long flags;
+
+       spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+       list_del_init(&info->eoi_list);
+       spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+}
+
+static void lateeoi_list_add(struct irq_info *info)
+{
+       struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
+       struct irq_info *elem;
+       u64 now = get_jiffies_64();
+       unsigned long delay;
+       unsigned long flags;
+
+       if (now < info->eoi_time)
+               delay = info->eoi_time - now;
+       else
+               delay = 1;
+
+       spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+
+       if (list_empty(&eoi->eoi_list)) {
+               list_add(&info->eoi_list, &eoi->eoi_list);
+               mod_delayed_work_on(info->eoi_cpu, system_wq,
+                                   &eoi->delayed, delay);
+       } else {
+               list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
+                       if (elem->eoi_time <= info->eoi_time)
+                               break;
+               }
+               list_add(&info->eoi_list, &elem->eoi_list);
+       }
+
+       spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+}
+
+static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
+{
+       evtchn_port_t evtchn;
+       unsigned int cpu;
+       unsigned int delay = 0;
+
+       evtchn = info->evtchn;
+       if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
+               return;
+
+       if (spurious) {
+               if ((1 << info->spurious_cnt) < (HZ << 2))
+                       info->spurious_cnt++;
+               if (info->spurious_cnt > 1) {
+                       delay = 1 << (info->spurious_cnt - 2);
+                       if (delay > HZ)
+                               delay = HZ;
+                       if (!info->eoi_time)
+                               info->eoi_cpu = smp_processor_id();
+                       info->eoi_time = get_jiffies_64() + delay;
+               }
+       } else {
+               info->spurious_cnt = 0;
+       }
+
+       cpu = info->eoi_cpu;
+       if (info->eoi_time &&
+           (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
+               lateeoi_list_add(info);
+               return;
+       }
+
+       info->eoi_time = 0;
+       unmask_evtchn(evtchn);
+}
+
+static void xen_irq_lateeoi_worker(struct work_struct *work)
+{
+       struct lateeoi_work *eoi;
+       struct irq_info *info;
+       u64 now = get_jiffies_64();
+       unsigned long flags;
+
+       eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
+
+       read_lock_irqsave(&evtchn_rwlock, flags);
+
+       while (true) {
+               spin_lock(&eoi->eoi_list_lock);
+
+               info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
+                                               eoi_list);
+
+               if (info == NULL || now < info->eoi_time) {
+                       spin_unlock(&eoi->eoi_list_lock);
+                       break;
+               }
+
+               list_del_init(&info->eoi_list);
+
+               spin_unlock(&eoi->eoi_list_lock);
+
+               info->eoi_time = 0;
+
+               xen_irq_lateeoi_locked(info, false);
+       }
+
+       if (info)
+               mod_delayed_work_on(info->eoi_cpu, system_wq,
+                                   &eoi->delayed, info->eoi_time - now);
+
+       read_unlock_irqrestore(&evtchn_rwlock, flags);
+}
+
+static void xen_cpu_init_eoi(unsigned int cpu)
+{
+       struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
+
+       INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
+       spin_lock_init(&eoi->eoi_list_lock);
+       INIT_LIST_HEAD(&eoi->eoi_list);
+}
+
+void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
+{
+       struct irq_info *info;
+       unsigned long flags;
+
+       read_lock_irqsave(&evtchn_rwlock, flags);
+
+       info = info_for_irq(irq);
+
+       if (info)
+               xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
+
+       read_unlock_irqrestore(&evtchn_rwlock, flags);
+}
+EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
+
 static void xen_irq_init(unsigned irq)
 {
        struct irq_info *info;
+
 #ifdef CONFIG_SMP
        /* By default all event channels notify CPU#0. */
        cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
@@ -399,6 +585,7 @@ static void xen_irq_init(unsigned irq)
 
        set_info_for_irq(irq, info);
 
+       INIT_LIST_HEAD(&info->eoi_list);
        list_add_tail(&info->list, &xen_irq_list_head);
 }
 
@@ -447,16 +634,24 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
 static void xen_free_irq(unsigned irq)
 {
        struct irq_info *info = info_for_irq(irq);
+       unsigned long flags;
 
        if (WARN_ON(!info))
                return;
 
+       write_lock_irqsave(&evtchn_rwlock, flags);
+
+       if (!list_empty(&info->eoi_list))
+               lateeoi_list_del(info);
+
        list_del(&info->list);
 
        set_info_for_irq(irq, NULL);
 
        WARN_ON(info->refcnt > 0);
 
+       write_unlock_irqrestore(&evtchn_rwlock, flags);
+
        kfree(info);
 
        /* Legacy IRQ descriptors are managed by the arch. */
@@ -848,7 +1043,7 @@ int xen_pirq_from_irq(unsigned irq)
 }
 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
 
-int bind_evtchn_to_irq(unsigned int evtchn)
+static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
 {
        int irq;
        int ret;
@@ -865,7 +1060,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                if (irq < 0)
                        goto out;
 
-               irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
+               irq_set_chip_and_handler_name(irq, chip,
                                              handle_edge_irq, "event");
 
                ret = xen_irq_info_evtchn_setup(irq, evtchn);
@@ -886,8 +1081,19 @@ int bind_evtchn_to_irq(unsigned int evtchn)
 
        return irq;
 }
+
+int bind_evtchn_to_irq(evtchn_port_t evtchn)
+{
+       return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
+}
 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
 
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
+{
+       return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
+
 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
 {
        struct evtchn_bind_ipi bind_ipi;
@@ -929,8 +1135,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int 
cpu)
        return irq;
 }
 
-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
-                                  unsigned int remote_port)
+static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
+                                              evtchn_port_t remote_port,
+                                              struct irq_chip *chip)
 {
        struct evtchn_bind_interdomain bind_interdomain;
        int err;
@@ -941,10 +1148,26 @@ int bind_interdomain_evtchn_to_irq(unsigned int 
remote_domain,
        err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
                                          &bind_interdomain);
 
-       return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
+       return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
+                                              chip);
+}
+
+int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+                                  evtchn_port_t remote_port)
+{
+       return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+                                                  &xen_dynamic_chip);
 }
 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
 
+int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
+                                          evtchn_port_t remote_port)
+{
+       return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+                                                  &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
+
 static int find_virq(unsigned int virq, unsigned int cpu)
 {
        struct evtchn_status status;
@@ -1040,14 +1263,15 @@ static void unbind_from_irq(unsigned int irq)
        mutex_unlock(&irq_mapping_update_lock);
 }
 
-int bind_evtchn_to_irqhandler(unsigned int evtchn,
-                             irq_handler_t handler,
-                             unsigned long irqflags,
-                             const char *devname, void *dev_id)
+static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
+                                         irq_handler_t handler,
+                                         unsigned long irqflags,
+                                         const char *devname, void *dev_id,
+                                         struct irq_chip *chip)
 {
        int irq, retval;
 
-       irq = bind_evtchn_to_irq(evtchn);
+       irq = bind_evtchn_to_irq_chip(evtchn, chip);
        if (irq < 0)
                return irq;
        retval = request_irq(irq, handler, irqflags, devname, dev_id);
@@ -1058,18 +1282,38 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
 
        return irq;
 }
+
+int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
+                             irq_handler_t handler,
+                             unsigned long irqflags,
+                             const char *devname, void *dev_id)
+{
+       return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
+                                             devname, dev_id,
+                                             &xen_dynamic_chip);
+}
 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
 
-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
-                                         unsigned int remote_port,
-                                         irq_handler_t handler,
-                                         unsigned long irqflags,
-                                         const char *devname,
-                                         void *dev_id)
+int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
+                                     irq_handler_t handler,
+                                     unsigned long irqflags,
+                                     const char *devname, void *dev_id)
+{
+       return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
+                                             devname, dev_id,
+                                             &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
+
+static int bind_interdomain_evtchn_to_irqhandler_chip(
+               unsigned int remote_domain, evtchn_port_t remote_port,
+               irq_handler_t handler, unsigned long irqflags,
+               const char *devname, void *dev_id, struct irq_chip *chip)
 {
        int irq, retval;
 
-       irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
+       irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+                                                 chip);
        if (irq < 0)
                return irq;
 
@@ -1081,8 +1325,33 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int 
remote_domain,
 
        return irq;
 }
+
+int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
+                                         evtchn_port_t remote_port,
+                                         irq_handler_t handler,
+                                         unsigned long irqflags,
+                                         const char *devname,
+                                         void *dev_id)
+{
+       return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
+                               remote_port, handler, irqflags, devname,
+                               dev_id, &xen_dynamic_chip);
+}
 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
 
+int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
+                                                 evtchn_port_t remote_port,
+                                                 irq_handler_t handler,
+                                                 unsigned long irqflags,
+                                                 const char *devname,
+                                                 void *dev_id)
+{
+       return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
+                               remote_port, handler, irqflags, devname,
+                               dev_id, &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
+
 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
                            irq_handler_t handler,
                            unsigned long irqflags, const char *devname, void 
*dev_id)
@@ -1195,7 +1464,7 @@ int evtchn_get(unsigned int evtchn)
                goto done;
 
        err = -EINVAL;
-       if (info->refcnt <= 0)
+       if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
                goto done;
 
        info->refcnt++;
@@ -1234,6 +1503,54 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector 
vector)
        notify_remote_via_irq(irq);
 }
 
+struct evtchn_loop_ctrl {
+       ktime_t timeout;
+       unsigned count;
+       bool defer_eoi;
+};
+
+void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+{
+       int irq;
+       struct irq_info *info;
+
+       irq = get_evtchn_to_irq(port);
+       if (irq == -1)
+               return;
+
+       /*
+        * Check for timeout every 256 events.
+        * We are setting the timeout value only after the first 256
+        * events in order to not hurt the common case of few loop
+        * iterations. The 256 is basically an arbitrary value.
+        *
+        * In case we are hitting the timeout we need to defer all further
+        * EOIs in order to ensure to leave the event handling loop rather
+        * sooner than later.
+        */
+       if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
+               ktime_t kt = ktime_get();
+
+               if (!ctrl->timeout.tv64) {
+                       kt = ktime_add_ms(kt,
+                                         jiffies_to_msecs(event_loop_timeout));
+                       ctrl->timeout = kt;
+               } else if (kt.tv64 > ctrl->timeout.tv64) {
+                       ctrl->defer_eoi = true;
+               }
+       }
+
+       info = info_for_irq(irq);
+
+       if (ctrl->defer_eoi) {
+               info->eoi_cpu = smp_processor_id();
+               info->irq_epoch = __this_cpu_read(irq_epoch);
+               info->eoi_time = get_jiffies_64() + event_eoi_delay;
+       }
+
+       generic_handle_irq(irq);
+}
+
 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
 
 static void __xen_evtchn_do_upcall(void)
@@ -1241,6 +1558,9 @@ static void __xen_evtchn_do_upcall(void)
        struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
        int cpu = get_cpu();
        unsigned count;
+       struct evtchn_loop_ctrl ctrl = { 0 };
+
+       read_lock(&evtchn_rwlock);
 
        do {
                vcpu_info->evtchn_upcall_pending = 0;
@@ -1248,7 +1568,7 @@ static void __xen_evtchn_do_upcall(void)
                if (__this_cpu_inc_return(xed_nesting_count) - 1)
                        goto out;
 
-               xen_evtchn_handle_events(cpu);
+               xen_evtchn_handle_events(cpu, &ctrl);
 
                BUG_ON(!irqs_disabled());
 
@@ -1257,6 +1577,14 @@ static void __xen_evtchn_do_upcall(void)
        } while (count != 1 || vcpu_info->evtchn_upcall_pending);
 
 out:
+       read_unlock(&evtchn_rwlock);
+
+       /*
+        * Increment irq_epoch only now to defer EOIs only for
+        * xen_irq_lateeoi() invocations occurring from inside the loop
+        * above.
+        */
+       __this_cpu_inc(irq_epoch);
 
        put_cpu();
 }
@@ -1613,6 +1941,21 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
        .irq_retrigger          = retrigger_dynirq,
 };
 
+static struct irq_chip xen_lateeoi_chip __read_mostly = {
+       /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
+       .name                   = "xen-dyn-lateeoi",
+
+       .irq_disable            = disable_dynirq,
+       .irq_mask               = disable_dynirq,
+       .irq_unmask             = enable_dynirq,
+
+       .irq_ack                = mask_ack_dynirq,
+       .irq_mask_ack           = mask_ack_dynirq,
+
+       .irq_set_affinity       = set_affinity_irq,
+       .irq_retrigger          = retrigger_dynirq,
+};
+
 static struct irq_chip xen_pirq_chip __read_mostly = {
        .name                   = "xen-pirq",
 
@@ -1680,12 +2023,31 @@ void xen_callback_vector(void)
 void xen_callback_vector(void) {}
 #endif
 
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "xen."
-
 static bool fifo_events = true;
 module_param(fifo_events, bool, 0);
 
+static int xen_evtchn_cpu_prepare(unsigned int cpu)
+{
+       int ret = 0;
+
+       xen_cpu_init_eoi(cpu);
+
+       if (evtchn_ops->percpu_init)
+               ret = evtchn_ops->percpu_init(cpu);
+
+       return ret;
+}
+
+static int xen_evtchn_cpu_dead(unsigned int cpu)
+{
+       int ret = 0;
+
+       if (evtchn_ops->percpu_deinit)
+               ret = evtchn_ops->percpu_deinit(cpu);
+
+       return ret;
+}
+
 void __init xen_init_IRQ(void)
 {
        int ret = -EINVAL;
@@ -1695,6 +2057,12 @@ void __init xen_init_IRQ(void)
        if (ret < 0)
                xen_evtchn_2l_init();
 
+       xen_cpu_init_eoi(smp_processor_id());
+
+       cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
+                                 "CPUHP_XEN_EVTCHN_PREPARE",
+                                 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
+
        evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
                                sizeof(*evtchn_to_irq), GFP_KERNEL);
        BUG_ON(!evtchn_to_irq);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 7ef27c6ed72f..0a4fece5fd8d 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -227,19 +227,25 @@ static bool evtchn_fifo_is_masked(unsigned port)
        return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
 }
 /*
- * Clear MASKED, spinning if BUSY is set.
+ * Clear MASKED if not PENDING, spinning if BUSY is set.
+ * Return true if mask was cleared.
  */
-static void clear_masked(volatile event_word_t *word)
+static bool clear_masked_cond(volatile event_word_t *word)
 {
        event_word_t new, old, w;
 
        w = *word;
 
        do {
+               if (w & (1 << EVTCHN_FIFO_PENDING))
+                       return false;
+
                old = w & ~(1 << EVTCHN_FIFO_BUSY);
                new = old & ~(1 << EVTCHN_FIFO_MASKED);
                w = sync_cmpxchg(word, old, new);
        } while (w != old);
+
+       return true;
 }
 
 static void evtchn_fifo_unmask(unsigned port)
@@ -248,8 +254,7 @@ static void evtchn_fifo_unmask(unsigned port)
 
        BUG_ON(!irqs_disabled());
 
-       clear_masked(word);
-       if (evtchn_fifo_is_pending(port)) {
+       if (!clear_masked_cond(word)) {
                struct evtchn_unmask unmask = { .port = port };
                (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
        }
@@ -270,19 +275,9 @@ static uint32_t clear_linked(volatile event_word_t *word)
        return w & EVTCHN_FIFO_LINK_MASK;
 }
 
-static void handle_irq_for_port(unsigned port)
-{
-       int irq;
-
-       irq = get_evtchn_to_irq(port);
-       if (irq != -1)
-               generic_handle_irq(irq);
-}
-
-static void consume_one_event(unsigned cpu,
+static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
                              struct evtchn_fifo_control_block *control_block,
-                             unsigned priority, unsigned long *ready,
-                             bool drop)
+                             unsigned priority, unsigned long *ready)
 {
        struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
        uint32_t head;
@@ -315,16 +310,17 @@ static void consume_one_event(unsigned cpu,
                clear_bit(priority, ready);
 
        if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
-               if (unlikely(drop))
+               if (unlikely(!ctrl))
                        pr_warn("Dropping pending event for port %u\n", port);
                else
-                       handle_irq_for_port(port);
+                       handle_irq_for_port(port, ctrl);
        }
 
        q->head[priority] = head;
 }
 
-static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
+static void __evtchn_fifo_handle_events(unsigned cpu,
+                                       struct evtchn_loop_ctrl *ctrl)
 {
        struct evtchn_fifo_control_block *control_block;
        unsigned long ready;
@@ -336,14 +332,15 @@ static void __evtchn_fifo_handle_events(unsigned cpu, 
bool drop)
 
        while (ready) {
                q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
-               consume_one_event(cpu, control_block, q, &ready, drop);
+               consume_one_event(cpu, ctrl, control_block, q, &ready);
                ready |= xchg(&control_block->ready, 0);
        }
 }
 
-static void evtchn_fifo_handle_events(unsigned cpu)
+static void evtchn_fifo_handle_events(unsigned cpu,
+                                     struct evtchn_loop_ctrl *ctrl)
 {
-       __evtchn_fifo_handle_events(cpu, false);
+       __evtchn_fifo_handle_events(cpu, ctrl);
 }
 
 static void evtchn_fifo_resume(void)
@@ -381,21 +378,6 @@ static void evtchn_fifo_resume(void)
        event_array_pages = 0;
 }
 
-static const struct evtchn_ops evtchn_ops_fifo = {
-       .max_channels      = evtchn_fifo_max_channels,
-       .nr_channels       = evtchn_fifo_nr_channels,
-       .setup             = evtchn_fifo_setup,
-       .bind_to_cpu       = evtchn_fifo_bind_to_cpu,
-       .clear_pending     = evtchn_fifo_clear_pending,
-       .set_pending       = evtchn_fifo_set_pending,
-       .is_pending        = evtchn_fifo_is_pending,
-       .test_and_set_mask = evtchn_fifo_test_and_set_mask,
-       .mask              = evtchn_fifo_mask,
-       .unmask            = evtchn_fifo_unmask,
-       .handle_events     = evtchn_fifo_handle_events,
-       .resume            = evtchn_fifo_resume,
-};
-
 static int evtchn_fifo_alloc_control_block(unsigned cpu)
 {
        void *control_block = NULL;
@@ -418,19 +400,36 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu)
        return ret;
 }
 
-static int xen_evtchn_cpu_prepare(unsigned int cpu)
+static int evtchn_fifo_percpu_init(unsigned int cpu)
 {
        if (!per_cpu(cpu_control_block, cpu))
                return evtchn_fifo_alloc_control_block(cpu);
        return 0;
 }
 
-static int xen_evtchn_cpu_dead(unsigned int cpu)
+static int evtchn_fifo_percpu_deinit(unsigned int cpu)
 {
-       __evtchn_fifo_handle_events(cpu, true);
+       __evtchn_fifo_handle_events(cpu, NULL);
        return 0;
 }
 
+static const struct evtchn_ops evtchn_ops_fifo = {
+       .max_channels      = evtchn_fifo_max_channels,
+       .nr_channels       = evtchn_fifo_nr_channels,
+       .setup             = evtchn_fifo_setup,
+       .bind_to_cpu       = evtchn_fifo_bind_to_cpu,
+       .clear_pending     = evtchn_fifo_clear_pending,
+       .set_pending       = evtchn_fifo_set_pending,
+       .is_pending        = evtchn_fifo_is_pending,
+       .test_and_set_mask = evtchn_fifo_test_and_set_mask,
+       .mask              = evtchn_fifo_mask,
+       .unmask            = evtchn_fifo_unmask,
+       .handle_events     = evtchn_fifo_handle_events,
+       .resume            = evtchn_fifo_resume,
+       .percpu_init       = evtchn_fifo_percpu_init,
+       .percpu_deinit     = evtchn_fifo_percpu_deinit,
+};
+
 int __init xen_evtchn_fifo_init(void)
 {
        int cpu = get_cpu();
@@ -444,9 +443,6 @@ int __init xen_evtchn_fifo_init(void)
 
        evtchn_ops = &evtchn_ops_fifo;
 
-       cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
-                                 "CPUHP_XEN_EVTCHN_PREPARE",
-                                 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
 out:
        put_cpu();
        return ret;
diff --git a/drivers/xen/events/events_internal.h 
b/drivers/xen/events/events_internal.h
index 50c2050a1e32..b9b4f5919893 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -32,11 +32,16 @@ enum xen_irq_type {
  */
 struct irq_info {
        struct list_head list;
-       int refcnt;
+       struct list_head eoi_list;
+       short refcnt;
+       short spurious_cnt;
        enum xen_irq_type type; /* type */
        unsigned irq;
        unsigned int evtchn;    /* event channel */
        unsigned short cpu;     /* cpu bound */
+       unsigned short eoi_cpu; /* EOI must happen on this cpu */
+       unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
+       u64 eoi_time;           /* Time in jiffies when to EOI. */
 
        union {
                unsigned short virq;
@@ -55,6 +60,8 @@ struct irq_info {
 #define PIRQ_SHAREABLE (1 << 1)
 #define PIRQ_MSI_GROUP (1 << 2)
 
+struct evtchn_loop_ctrl;
+
 struct evtchn_ops {
        unsigned (*max_channels)(void);
        unsigned (*nr_channels)(void);
@@ -69,14 +76,18 @@ struct evtchn_ops {
        void (*mask)(unsigned port);
        void (*unmask)(unsigned port);
 
-       void (*handle_events)(unsigned cpu);
+       void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
        void (*resume)(void);
+
+       int (*percpu_init)(unsigned int cpu);
+       int (*percpu_deinit)(unsigned int cpu);
 };
 
 extern const struct evtchn_ops *evtchn_ops;
 
 extern int **evtchn_to_irq;
 int get_evtchn_to_irq(unsigned int evtchn);
+void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
 
 struct irq_info *info_for_irq(unsigned irq);
 unsigned cpu_from_irq(unsigned irq);
@@ -134,9 +145,10 @@ static inline void unmask_evtchn(unsigned port)
        return evtchn_ops->unmask(port);
 }
 
-static inline void xen_evtchn_handle_events(unsigned cpu)
+static inline void xen_evtchn_handle_events(unsigned cpu,
+                                           struct evtchn_loop_ctrl *ctrl)
 {
-       return evtchn_ops->handle_events(cpu);
+       return evtchn_ops->handle_events(cpu, ctrl);
 }
 
 static inline void xen_evtchn_resume(void)
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index e8c7f09d01be..bcf0b1e60f2c 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -178,7 +178,6 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
             "Interrupt for port %d, but apparently not enabled; per-user %p\n",
             evtchn->port, u);
 
-       disable_irq_nosync(irq);
        evtchn->enabled = false;
 
        spin_lock(&u->ring_prod_lock);
@@ -304,7 +303,7 @@ static ssize_t evtchn_write(struct file *file, const char 
__user *buf,
                evtchn = find_evtchn(u, port);
                if (evtchn && !evtchn->enabled) {
                        evtchn->enabled = true;
-                       enable_irq(irq_from_evtchn(port));
+                       xen_irq_lateeoi(irq_from_evtchn(port), 0);
                }
        }
 
@@ -404,8 +403,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, int 
port)
        if (rc < 0)
                goto err;
 
-       rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
-                                      u->name, evtchn);
+       rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
+                                              u->name, evtchn);
        if (rc < 0)
                goto err;
 
diff --git a/drivers/xen/xen-pciback/pci_stub.c 
b/drivers/xen/xen-pciback/pci_stub.c
index ee5ce9286d61..83d798d12400 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -733,10 +733,17 @@ static pci_ers_result_t common_process(struct 
pcistub_device *psdev,
        wmb();
        notify_remote_via_irq(pdev->evtchn_irq);
 
+       /* Enable IRQ to signal "request done". */
+       xen_pcibk_lateeoi(pdev, 0);
+
        ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
                                 !(test_bit(_XEN_PCIB_active, (unsigned long *)
                                 &sh_info->flags)), 300*HZ);
 
+       /* Enable IRQ for pcifront request if not already active. */
+       if (!test_bit(_PDEVF_op_active, &pdev->flags))
+               xen_pcibk_lateeoi(pdev, 0);
+
        if (!ret) {
                if (test_bit(_XEN_PCIB_active,
                        (unsigned long *)&sh_info->flags)) {
@@ -750,13 +757,6 @@ static pci_ers_result_t common_process(struct 
pcistub_device *psdev,
        }
        clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
 
-       if (test_bit(_XEN_PCIF_active,
-               (unsigned long *)&sh_info->flags)) {
-               dev_dbg(&psdev->dev->dev,
-                       "schedule pci_conf service in " DRV_NAME "\n");
-               xen_pcibk_test_and_schedule_op(psdev->pdev);
-       }
-
        res = (pci_ers_result_t)aer_op->err;
        return res;
 }
diff --git a/drivers/xen/xen-pciback/pciback.h 
b/drivers/xen/xen-pciback/pciback.h
index 7af369b6aaa2..b97cf348cdc1 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 #include <linux/atomic.h>
+#include <xen/events.h>
 #include <xen/interface/io/pciif.h>
 
 #define DRV_NAME       "xen-pciback"
@@ -26,6 +27,8 @@ struct pci_dev_entry {
 #define PDEVF_op_active                (1<<(_PDEVF_op_active))
 #define _PCIB_op_pending       (1)
 #define PCIB_op_pending                (1<<(_PCIB_op_pending))
+#define _EOI_pending           (2)
+#define EOI_pending            (1<<(_EOI_pending))
 
 struct xen_pcibk_device {
        void *pci_dev_data;
@@ -181,12 +184,17 @@ static inline void xen_pcibk_release_devices(struct 
xen_pcibk_device *pdev)
 irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
 void xen_pcibk_do_op(struct work_struct *data);
 
+static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev,
+                                    unsigned int eoi_flag)
+{
+       if (test_and_clear_bit(_EOI_pending, &pdev->flags))
+               xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag);
+}
+
 int xen_pcibk_xenbus_register(void);
 void xen_pcibk_xenbus_unregister(void);
 
 extern int verbose_request;
-
-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
 #endif
 
 /* Handles shared IRQs that can to device domain and control domain. */
diff --git a/drivers/xen/xen-pciback/pciback_ops.c 
b/drivers/xen/xen-pciback/pciback_ops.c
index e7fbed56c044..eb5dd80530e7 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -296,26 +296,41 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
        return 0;
 }
 #endif
+
+static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev)
+{
+       return test_bit(_XEN_PCIF_active,
+                       (unsigned long *)&pdev->sh_info->flags) &&
+              !test_and_set_bit(_PDEVF_op_active, &pdev->flags);
+}
+
 /*
 * Now the same evtchn is used for both pcifront conf_read_write request
 * as well as pcie aer front end ack. We use a new work_queue to schedule
 * xen_pcibk conf_read_write service for avoiding confict with aer_core
 * do_recovery job which also use the system default work_queue
 */
-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
+static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
 {
+       bool eoi = true;
+
        /* Check that frontend is requesting an operation and that we are not
         * already processing a request */
-       if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
-           && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
+       if (xen_pcibk_test_op_pending(pdev)) {
                schedule_work(&pdev->op_work);
+               eoi = false;
        }
        /*_XEN_PCIB_active should have been cleared by pcifront. And also make
        sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
        if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
            && test_bit(_PCIB_op_pending, &pdev->flags)) {
                wake_up(&xen_pcibk_aer_wait_queue);
+               eoi = false;
        }
+
+       /* EOI if there was nothing to do. */
+       if (eoi)
+               xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS);
 }
 
 /* Performing the configuration space reads/writes must not be done in atomic
@@ -323,10 +338,8 @@ void xen_pcibk_test_and_schedule_op(struct 
xen_pcibk_device *pdev)
  * use of semaphores). This function is intended to be called from a work
  * queue in process context taking a struct xen_pcibk_device as a parameter */
 
-void xen_pcibk_do_op(struct work_struct *data)
+static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev)
 {
-       struct xen_pcibk_device *pdev =
-               container_of(data, struct xen_pcibk_device, op_work);
        struct pci_dev *dev;
        struct xen_pcibk_dev_data *dev_data = NULL;
        struct xen_pci_op *op = &pdev->op;
@@ -399,16 +412,31 @@ void xen_pcibk_do_op(struct work_struct *data)
        smp_mb__before_atomic(); /* /after/ clearing PCIF_active */
        clear_bit(_PDEVF_op_active, &pdev->flags);
        smp_mb__after_atomic(); /* /before/ final check for work */
+}
 
-       /* Check to see if the driver domain tried to start another request in
-        * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
-       */
-       xen_pcibk_test_and_schedule_op(pdev);
+void xen_pcibk_do_op(struct work_struct *data)
+{
+       struct xen_pcibk_device *pdev =
+               container_of(data, struct xen_pcibk_device, op_work);
+
+       do {
+               xen_pcibk_do_one_op(pdev);
+       } while (xen_pcibk_test_op_pending(pdev));
+
+       xen_pcibk_lateeoi(pdev, 0);
 }
 
 irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
 {
        struct xen_pcibk_device *pdev = dev_id;
+       bool eoi;
+
+       /* IRQs might come in before pdev->evtchn_irq is written. */
+       if (unlikely(pdev->evtchn_irq != irq))
+               pdev->evtchn_irq = irq;
+
+       eoi = test_and_set_bit(_EOI_pending, &pdev->flags);
+       WARN(eoi, "IRQ while EOI pending\n");
 
        xen_pcibk_test_and_schedule_op(pdev);
 
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 5ce878c51d03..f33eb40cb414 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -122,7 +122,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device 
*pdev, int gnt_ref,
 
        pdev->sh_info = vaddr;
 
-       err = bind_interdomain_evtchn_to_irqhandler(
+       err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
                pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
                0, DRV_NAME, pdev);
        if (err < 0) {
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 992cb8fa272c..3243d917651a 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -91,7 +91,6 @@ struct vscsibk_info {
        unsigned int irq;
 
        struct vscsiif_back_ring ring;
-       int ring_error;
 
        spinlock_t ring_lock;
        atomic_t nr_unreplied_reqs;
@@ -723,7 +722,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct 
vscsibk_info *info,
        return pending_req;
 }
 
-static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+static int scsiback_do_cmd_fn(struct vscsibk_info *info,
+                             unsigned int *eoi_flags)
 {
        struct vscsiif_back_ring *ring = &info->ring;
        struct vscsiif_request ring_req;
@@ -740,11 +740,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
                rc = ring->rsp_prod_pvt;
                pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). 
Halting ring processing\n",
                           info->domid, rp, rc, rp - rc);
-               info->ring_error = 1;
-               return 0;
+               return -EINVAL;
        }
 
        while ((rc != rp)) {
+               *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
+
                if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
                        break;
 
@@ -803,13 +804,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
 {
        struct vscsibk_info *info = dev_id;
+       int rc;
+       unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
 
-       if (info->ring_error)
-               return IRQ_HANDLED;
-
-       while (scsiback_do_cmd_fn(info))
+       while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
                cond_resched();
 
+       /* In case of a ring error we keep the event channel masked. */
+       if (!rc)
+               xen_irq_lateeoi(irq, eoi_flags);
+
        return IRQ_HANDLED;
 }
 
@@ -830,7 +834,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, 
grant_ref_t ring_ref,
        sring = (struct vscsiif_sring *)area;
        BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
 
-       err = bind_interdomain_evtchn_to_irq(info->domid, evtchn);
+       err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn);
        if (err < 0)
                goto unmap_page;
 
@@ -1253,7 +1257,6 @@ static int scsiback_probe(struct xenbus_device *dev,
 
        info->domid = dev->otherend_id;
        spin_lock_init(&info->ring_lock);
-       info->ring_error = 0;
        atomic_set(&info->nr_unreplied_reqs, 0);
        init_waitqueue_head(&info->waiting_to_free);
        info->dev = dev;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d6c827a9ebc5..5c2f4f58da8f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3879,6 +3879,10 @@ int btree_write_cache_pages(struct address_space 
*mapping,
                        if (!ret) {
                                free_extent_buffer(eb);
                                continue;
+                       } else if (ret < 0) {
+                               done = 1;
+                               free_extent_buffer(eb);
+                               break;
                        }
 
                        ret = write_one_eb(eb, fs_info, wbc, &epd);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 981091bd6c3c..ebca009030c3 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3854,6 +3854,8 @@ static int btrfs_clone(struct inode *src, struct inode 
*inode,
                        ret = -EINTR;
                        goto out;
                }
+
+               cond_resched();
        }
        ret = 0;
 
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 211ac472cb9d..942874257a09 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -493,7 +493,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, int 
srclen,
                else if (map_chars == SFM_MAP_UNI_RSVD) {
                        bool end_of_string;
 
-                       if (i == srclen - 1)
+                       /**
+                        * Remap spaces and periods found at the end of every
+                        * component of the path. The special cases of '.' and
+                        * '..' do not need to be dealt with explicitly because
+                        * they are addressed in namei.c:link_path_walk().
+                        **/
+                       if ((i == srclen - 1) || (source[i+1] == '\\'))
                                end_of_string = true;
                        else
                                end_of_string = false;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 4df4d31057b3..5bf62bdd84b8 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1890,6 +1890,7 @@ void ext4_inline_data_truncate(struct inode *inode, int 
*has_inline)
 
        ext4_write_lock_xattr(inode, &no_expand);
        if (!ext4_has_inline_data(inode)) {
+               ext4_write_unlock_xattr(inode, &no_expand);
                *has_inline = 0;
                ext4_journal_stop(handle);
                return;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2527eb304949..84d28d3efc60 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1571,8 +1571,8 @@ static const struct mount_opts {
        {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
                       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
                                                        MOPT_CLEAR | MOPT_Q},
-       {Opt_usrjquota, 0, MOPT_Q},
-       {Opt_grpjquota, 0, MOPT_Q},
+       {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
+       {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
        {Opt_offusrjquota, 0, MOPT_Q},
        {Opt_offgrpjquota, 0, MOPT_Q},
        {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
@@ -4336,6 +4336,7 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
 #ifdef CONFIG_QUOTA
 failed_mount8:
        ext4_unregister_sysfs(sb);
+       kobject_put(&sbi->s_kobj);
 #endif
 failed_mount7:
        ext4_unregister_li_request(sb);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index efd44d5645d8..f19e49a5d032 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -758,7 +758,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
        }
        kfree(gl->gl_lksb.sb_lvbptr);
        kmem_cache_free(cachep, gl);
-       atomic_dec(&sdp->sd_glock_disposal);
+       if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+               wake_up(&sdp->sd_glock_wait);
        *glp = tmp;
 
        return ret;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 0a80f6636549..9621badb9599 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -730,9 +730,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
                }
 
                gfs2_free_clones(rgd);
+               return_all_reservations(rgd);
                kfree(rgd->rd_bits);
                rgd->rd_bits = NULL;
-               return_all_reservations(rgd);
                kmem_cache_free(gfs2_rgrpd_cachep, rgd);
        }
 }
@@ -1371,6 +1371,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+               return -EROFS;
+
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
 
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 4d6e99ea37b8..e0fb62f5cf63 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1733,6 +1733,7 @@ static void ocfs2_inode_init_once(void *data)
 
        oi->ip_blkno = 0ULL;
        oi->ip_clusters = 0;
+       oi->ip_next_orphan = NULL;
 
        ocfs2_resv_init_once(&oi->ip_la_data_resv);
 
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 3a8cc7139912..89fdcc641715 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -1318,7 +1318,7 @@ xfs_rmap_convert_shared(
         * record for our insertion point. This will also give us the record for
         * start block contiguity tests.
         */
-       error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
+       error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
                        &PREV, &i);
        XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
 
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 33a28efc3085..c5a24b80c7f7 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -262,8 +262,8 @@ xfs_rmapbt_key_diff(
        else if (y > x)
                return -1;
 
-       x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
-       y = rec->rm_offset;
+       x = be64_to_cpu(kp->rm_offset);
+       y = xfs_rmap_irec_offset_pack(rec);
        if (x > y)
                return 1;
        else if (y > x)
@@ -294,8 +294,8 @@ xfs_rmapbt_diff_two_keys(
        else if (y > x)
                return -1;
 
-       x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
-       y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
+       x = be64_to_cpu(kp1->rm_offset);
+       y = be64_to_cpu(kp2->rm_offset);
        if (x > y)
                return 1;
        else if (y > x)
@@ -401,8 +401,8 @@ xfs_rmapbt_keys_inorder(
                return 1;
        else if (a > b)
                return 0;
-       a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
-       b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
+       a = be64_to_cpu(k1->rmap.rm_offset);
+       b = be64_to_cpu(k2->rmap.rm_offset);
        if (a <= b)
                return 1;
        return 0;
@@ -431,8 +431,8 @@ xfs_rmapbt_recs_inorder(
                return 1;
        else if (a > b)
                return 0;
-       a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
-       b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
+       a = be64_to_cpu(r1->rmap.rm_offset);
+       b = be64_to_cpu(r2->rmap.rm_offset);
        if (a <= b)
                return 1;
        return 0;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 7bfddcd32d73..0d587657056d 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -864,6 +864,16 @@ xfs_setattr_size(
        if (newsize > oldsize) {
                error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
        } else {
+               /*
+                * iomap won't detect a dirty page over an unwritten block (or a
+                * cow block over a hole) and subsequently skips zeroing the
+                * newly post-EOF portion of the page. Flush the new EOF to
+                * convert the block before the pagecache truncate.
+                */
+               error = filemap_write_and_wait_range(inode->i_mapping, newsize,
+                                                    newsize);
+               if (error)
+                       return error;
                error = iomap_truncate_page(inode, newsize, &did_zeroing,
                                &xfs_iomap_ops);
        }
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index cecd37569ddb..353bfe9c5cdd 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -144,7 +144,7 @@ xfs_fs_map_blocks(
                goto out_unlock;
        error = invalidate_inode_pages2(inode->i_mapping);
        if (WARN_ON_ONCE(error))
-               return error;
+               goto out_unlock;
 
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 51bb6532785c..1a2111c775ae 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -60,21 +60,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, 
struct sock *sk)
  */
 static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
 {
-       if (skb_shared(skb)) {
-               struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+       struct sk_buff *nskb;
 
-               if (likely(nskb)) {
-                       can_skb_set_owner(nskb, skb->sk);
-                       consume_skb(skb);
-                       return nskb;
-               } else {
-                       kfree_skb(skb);
-                       return NULL;
-               }
+       nskb = skb_clone(skb, GFP_ATOMIC);
+       if (unlikely(!nskb)) {
+               kfree_skb(skb);
+               return NULL;
        }
 
-       /* we can assume to have an unshared skb with proper owner */
-       return skb;
+       can_skb_set_owner(nskb, skb->sk);
+       consume_skb(skb);
+       return nskb;
 }
 
 #endif /* !_CAN_SKB_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ae8ecf821019..a7057b772612 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -475,7 +475,7 @@ struct pmu {
  */
 struct perf_addr_filter {
        struct list_head        entry;
-       struct inode            *inode;
+       struct path             path;
        unsigned long           offset;
        unsigned long           size;
        unsigned int            range   : 1,
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index aa16e6468f91..cc1e71334e53 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -16,12 +16,44 @@ void prandom_bytes(void *buf, size_t nbytes);
 void prandom_seed(u32 seed);
 void prandom_reseed_late(void);
 
+#if BITS_PER_LONG == 64
+/*
+ * The core SipHash round function.  Each line can be executed in
+ * parallel given enough CPU resources.
+ */
+#define PRND_SIPROUND(v0, v1, v2, v3) ( \
+       v0 += v1, v1 = rol64(v1, 13),  v2 += v3, v3 = rol64(v3, 16), \
+       v1 ^= v0, v0 = rol64(v0, 32),  v3 ^= v2,                     \
+       v0 += v3, v3 = rol64(v3, 21),  v2 += v1, v1 = rol64(v1, 17), \
+       v3 ^= v0,                      v1 ^= v2, v2 = rol64(v2, 32)  \
+)
+
+#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
+#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
+
+#elif BITS_PER_LONG == 32
+/*
+ * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
+ * This is weaker, but 32-bit machines are not used for high-traffic
+ * applications, so there is less output for an attacker to analyze.
+ */
+#define PRND_SIPROUND(v0, v1, v2, v3) ( \
+       v0 += v1, v1 = rol32(v1,  5),  v2 += v3, v3 = rol32(v3,  8), \
+       v1 ^= v0, v0 = rol32(v0, 16),  v3 ^= v2,                     \
+       v0 += v3, v3 = rol32(v3,  7),  v2 += v1, v1 = rol32(v1, 13), \
+       v3 ^= v0,                      v1 ^= v2, v2 = rol32(v2, 16)  \
+)
+#define PRND_K0 0x6c796765
+#define PRND_K1 0x74656462
+
+#else
+#error Unsupported BITS_PER_LONG
+#endif
+
 struct rnd_state {
        __u32 s1, s2, s3, s4;
 };
 
-DECLARE_PER_CPU(struct rnd_state, net_rand_state);
-
 u32 prandom_u32_state(struct rnd_state *state);
 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
 void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 980c71b3001a..2a45b8c87edb 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -188,6 +188,10 @@ static inline bool timespec64_valid_strict(const struct 
timespec64 *ts)
  */
 static inline s64 timespec64_to_ns(const struct timespec64 *ts)
 {
+       /* Prevent multiplication overflow */
+       if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+               return KTIME_MAX;
+
        return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
 }
 
diff --git a/include/xen/events.h b/include/xen/events.h
index 88da2abaf535..ad0c61cf399b 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -12,11 +12,16 @@
 
 unsigned xen_evtchn_nr_channels(void);
 
-int bind_evtchn_to_irq(unsigned int evtchn);
-int bind_evtchn_to_irqhandler(unsigned int evtchn,
+int bind_evtchn_to_irq(evtchn_port_t evtchn);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
+int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
                              irq_handler_t handler,
                              unsigned long irqflags, const char *devname,
                              void *dev_id);
+int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
+                                     irq_handler_t handler,
+                                     unsigned long irqflags, const char 
*devname,
+                                     void *dev_id);
 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
                            irq_handler_t handler,
@@ -29,13 +34,21 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
                           const char *devname,
                           void *dev_id);
 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
-                                  unsigned int remote_port);
+                                  evtchn_port_t remote_port);
+int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
+                                          evtchn_port_t remote_port);
 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
-                                         unsigned int remote_port,
+                                         evtchn_port_t remote_port,
                                          irq_handler_t handler,
                                          unsigned long irqflags,
                                          const char *devname,
                                          void *dev_id);
+int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
+                                                 evtchn_port_t remote_port,
+                                                 irq_handler_t handler,
+                                                 unsigned long irqflags,
+                                                 const char *devname,
+                                                 void *dev_id);
 
 /*
  * Common unbind function for all event sources. Takes IRQ to unbind from.
@@ -44,6 +57,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int 
remote_domain,
  */
 void unbind_from_irqhandler(unsigned int irq, void *dev_id);
 
+/*
+ * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi
+ * functions above.
+ */
+void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags);
+/* Signal an event was spurious, i.e. there was no action resulting from it. */
+#define XEN_EOI_FLAG_SPURIOUS  0x00000001
+
 #define XEN_IRQ_PRIORITY_MAX     EVTCHN_FIFO_PRIORITY_MAX
 #define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT
 #define XEN_IRQ_PRIORITY_MIN     EVTCHN_FIFO_PRIORITY_MIN
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7aad4d22b422..9d4a71deac99 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5069,11 +5069,11 @@ static void perf_pmu_output_stop(struct perf_event 
*event);
 static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
-
        struct ring_buffer *rb = ring_buffer_get(event);
        struct user_struct *mmap_user = rb->mmap_user;
        int mmap_locked = rb->mmap_locked;
        unsigned long size = perf_data_size(rb);
+       bool detach_rest = false;
 
        if (event->pmu->event_unmapped)
                event->pmu->event_unmapped(event);
@@ -5104,7 +5104,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                mutex_unlock(&event->mmap_mutex);
        }
 
-       atomic_dec(&rb->mmap_count);
+       if (atomic_dec_and_test(&rb->mmap_count))
+               detach_rest = true;
 
        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
                goto out_put;
@@ -5113,7 +5114,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        mutex_unlock(&event->mmap_mutex);
 
        /* If there's still other mmap()s of this buffer, we're done. */
-       if (atomic_read(&rb->mmap_count))
+       if (!detach_rest)
                goto out_put;
 
        /*
@@ -6271,7 +6272,7 @@ static void perf_event_addr_filters_exec(struct 
perf_event *event, void *data)
 
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               if (filter->inode) {
+               if (filter->path.dentry) {
                        event->addr_filters_offs[count] = 0;
                        restart++;
                }
@@ -6814,7 +6815,11 @@ static bool perf_addr_filter_match(struct 
perf_addr_filter *filter,
                                     struct file *file, unsigned long offset,
                                     unsigned long size)
 {
-       if (filter->inode != file->f_inode)
+       /* d_inode(NULL) won't be equal to any mapped user-space file */
+       if (!filter->path.dentry)
+               return false;
+
+       if (d_inode(filter->path.dentry) != file_inode(file))
                return false;
 
        if (filter->offset > offset + size)
@@ -8028,8 +8033,7 @@ static void free_filters_list(struct list_head *filters)
        struct perf_addr_filter *filter, *iter;
 
        list_for_each_entry_safe(filter, iter, filters, entry) {
-               if (filter->inode)
-                       iput(filter->inode);
+               path_put(&filter->path);
                list_del(&filter->entry);
                kfree(filter);
        }
@@ -8123,7 +8127,7 @@ static void perf_event_addr_filters_apply(struct 
perf_event *event)
                 * Adjust base offset if the filter is associated to a binary
                 * that needs to be mapped:
                 */
-               if (filter->inode)
+               if (filter->path.dentry)
                        event->addr_filters_offs[count] =
                                perf_addr_filter_apply(filter, mm);
 
@@ -8196,7 +8200,6 @@ perf_event_parse_addr_filter(struct perf_event *event, 
char *fstr,
 {
        struct perf_addr_filter *filter = NULL;
        char *start, *orig, *filename = NULL;
-       struct path path;
        substring_t args[MAX_OPT_ARGS];
        int state = IF_STATE_ACTION, token;
        unsigned int kernel = 0;
@@ -8259,6 +8262,7 @@ perf_event_parse_addr_filter(struct perf_event *event, 
char *fstr,
                        if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
                                int fpos = filter->range ? 2 : 1;
 
+                               kfree(filename);
                                filename = match_strdup(&args[fpos]);
                                if (!filename) {
                                        ret = -ENOMEM;
@@ -8287,19 +8291,15 @@ perf_event_parse_addr_filter(struct perf_event *event, 
char *fstr,
                                        goto fail;
 
                                /* look up the path and grab its inode */
-                               ret = kern_path(filename, LOOKUP_FOLLOW, &path);
+                               ret = kern_path(filename, LOOKUP_FOLLOW,
+                                               &filter->path);
                                if (ret)
-                                       goto fail_free_name;
-
-                               filter->inode = igrab(d_inode(path.dentry));
-                               path_put(&path);
-                               kfree(filename);
-                               filename = NULL;
+                                       goto fail;
 
                                ret = -EINVAL;
-                               if (!filter->inode ||
-                                   !S_ISREG(filter->inode->i_mode))
-                                       /* free_filters_list() will iput() */
+                               if (!filter->path.dentry ||
+                                   !S_ISREG(d_inode(filter->path.dentry)
+                                            ->i_mode))
                                        goto fail;
                        }
 
@@ -8312,13 +8312,13 @@ perf_event_parse_addr_filter(struct perf_event *event, 
char *fstr,
        if (state != IF_STATE_ACTION)
                goto fail;
 
+       kfree(filename);
        kfree(orig);
 
        return 0;
 
-fail_free_name:
-       kfree(filename);
 fail:
+       kfree(filename);
        free_filters_list(filters);
        kfree(orig);
 
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 486fd78eb8d5..c8c1c3db5d06 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -212,7 +212,7 @@ static inline int get_recursion_context(int *recursion)
                rctx = 3;
        else if (in_irq())
                rctx = 2;
-       else if (in_softirq())
+       else if (in_serving_softirq())
                rctx = 1;
        else
                rctx = 0;
diff --git a/kernel/exit.c b/kernel/exit.c
index 27f4168eaeb1..f9943ef23fa8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -483,7 +483,10 @@ static void exit_mm(struct task_struct *tsk)
                up_read(&mm->mmap_sem);
 
                self.task = tsk;
-               self.next = xchg(&core_state->dumper.next, &self);
+               if (self.task->flags & PF_SIGNALED)
+                       self.next = xchg(&core_state->dumper.next, &self);
+               else
+                       self.task = NULL;
                /*
                 * Implies mb(), the result of xchg() must be visible
                 * to core_state->dumper.
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 3bbfd6a9c475..bb3a46cbe034 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -67,6 +67,7 @@ config IRQ_DOMAIN_HIERARCHY
 # Generic IRQ IPI support
 config GENERIC_IRQ_IPI
        bool
+       select IRQ_DOMAIN_HIERARCHY
 
 # Generic MSI interrupt support
 config GENERIC_MSI_IRQ
diff --git a/kernel/reboot.c b/kernel/reboot.c
index bd30a973fe94..2946ed1d99d4 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -512,22 +512,22 @@ static int __init reboot_setup(char *str)
                        break;
 
                case 's':
-               {
-                       int rc;
-
-                       if (isdigit(*(str+1))) {
-                               rc = kstrtoint(str+1, 0, &reboot_cpu);
-                               if (rc)
-                                       return rc;
-                       } else if (str[1] == 'm' && str[2] == 'p' &&
-                                  isdigit(*(str+3))) {
-                               rc = kstrtoint(str+3, 0, &reboot_cpu);
-                               if (rc)
-                                       return rc;
-                       } else
+                       if (isdigit(*(str+1)))
+                               reboot_cpu = simple_strtoul(str+1, NULL, 0);
+                       else if (str[1] == 'm' && str[2] == 'p' &&
+                                                       isdigit(*(str+3)))
+                               reboot_cpu = simple_strtoul(str+3, NULL, 0);
+                       else
                                reboot_mode = REBOOT_SOFT;
+                       if (reboot_cpu >= num_possible_cpus()) {
+                               pr_err("Ignoring the CPU number in reboot= 
option. "
+                                      "CPU %d exceeds possible cpu number 
%d\n",
+                                      reboot_cpu, num_possible_cpus());
+                               reboot_cpu = 0;
+                               break;
+                       }
                        break;
-               }
+
                case 'g':
                        reboot_mode = REBOOT_GPIO;
                        break;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index d2e4698d43fe..c9325a1e30d0 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1636,13 +1636,6 @@ void update_process_times(int user_tick)
 #endif
        scheduler_tick();
        run_posix_cpu_timers(p);
-
-       /* The current CPU might make use of net randoms without receiving IRQs
-        * to renew them often enough. Let's update the net_rand_state from a
-        * non-constant value that's not affine to the number of calls to make
-        * sure it's updated when there's some activity (we don't care in idle).
-        */
-       this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
 }
 
 /**
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index fb2aa2430edc..55f60d2edc3f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -416,14 +416,16 @@ struct rb_event_info {
 
 /*
  * Used for which event context the event is in.
- *  NMI     = 0
- *  IRQ     = 1
- *  SOFTIRQ = 2
- *  NORMAL  = 3
+ *  TRANSITION = 0
+ *  NMI     = 1
+ *  IRQ     = 2
+ *  SOFTIRQ = 3
+ *  NORMAL  = 4
  *
  * See trace_recursive_lock() comment below for more details.
  */
 enum {
+       RB_CTX_TRANSITION,
        RB_CTX_NMI,
        RB_CTX_IRQ,
        RB_CTX_SOFTIRQ,
@@ -2579,10 +2581,10 @@ rb_wakeups(struct ring_buffer *buffer, struct 
ring_buffer_per_cpu *cpu_buffer)
  * a bit of overhead in something as critical as function tracing,
  * we use a bitmask trick.
  *
- *  bit 0 =  NMI context
- *  bit 1 =  IRQ context
- *  bit 2 =  SoftIRQ context
- *  bit 3 =  normal context.
+ *  bit 1 =  NMI context
+ *  bit 2 =  IRQ context
+ *  bit 3 =  SoftIRQ context
+ *  bit 4 =  normal context.
  *
  * This works because this is the order of contexts that can
  * preempt other contexts. A SoftIRQ never preempts an IRQ
@@ -2605,6 +2607,30 @@ rb_wakeups(struct ring_buffer *buffer, struct 
ring_buffer_per_cpu *cpu_buffer)
  * The least significant bit can be cleared this way, and it
  * just so happens that it is the same bit corresponding to
  * the current context.
+ *
+ * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
+ * is set when a recursion is detected at the current context, and if
+ * the TRANSITION bit is already set, it will fail the recursion.
+ * This is needed because there's a lag between the changing of
+ * interrupt context and updating the preempt count. In this case,
+ * a false positive will be found. To handle this, one extra recursion
+ * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
+ * bit is already set, then it is considered a recursion and the function
+ * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
+ *
+ * On the trace_recursive_unlock(), the TRANSITION bit will be the first
+ * to be cleared. Even if it wasn't the context that set it. That is,
+ * if an interrupt comes in while NORMAL bit is set and the ring buffer
+ * is called before preempt_count() is updated, since the check will
+ * be on the NORMAL bit, the TRANSITION bit will then be set. If an
+ * NMI then comes in, it will set the NMI bit, but when the NMI code
+ * does the trace_recursive_unlock() it will clear the TRANSTION bit
+ * and leave the NMI bit set. But this is fine, because the interrupt
+ * code that set the TRANSITION bit will then clear the NMI bit when it
+ * calls trace_recursive_unlock(). If another NMI comes in, it will
+ * set the TRANSITION bit and continue.
+ *
+ * Note: The TRANSITION bit only handles a single transition between context.
  */
 
 static __always_inline int
@@ -2623,8 +2649,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu 
*cpu_buffer)
        } else
                bit = RB_CTX_NORMAL;
 
-       if (unlikely(val & (1 << bit)))
-               return 1;
+       if (unlikely(val & (1 << bit))) {
+               /*
+                * It is possible that this was called by transitioning
+                * between interrupt context, and preempt_count() has not
+                * been updated yet. In this case, use the TRANSITION bit.
+                */
+               bit = RB_CTX_TRANSITION;
+               if (val & (1 << bit))
+                       return 1;
+       }
 
        val |= (1 << bit);
        cpu_buffer->current_context = val;
diff --git a/lib/random32.c b/lib/random32.c
index d5c3137d93f4..3c5b67b69cba 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,16 +39,6 @@
 #include <linux/sched.h>
 #include <asm/unaligned.h>
 
-#ifdef CONFIG_RANDOM32_SELFTEST
-static void __init prandom_state_selftest(void);
-#else
-static inline void prandom_state_selftest(void)
-{
-}
-#endif
-
-DEFINE_PER_CPU(struct rnd_state, net_rand_state)  __latent_entropy;
-
 /**
  *     prandom_u32_state - seeded pseudo-random number generator.
  *     @state: pointer to state structure holding seeded state.
@@ -68,25 +58,6 @@ u32 prandom_u32_state(struct rnd_state *state)
 }
 EXPORT_SYMBOL(prandom_u32_state);
 
-/**
- *     prandom_u32 - pseudo random number generator
- *
- *     A 32 bit pseudo-random number is generated using a fast
- *     algorithm suitable for simulation. This algorithm is NOT
- *     considered safe for cryptographic use.
- */
-u32 prandom_u32(void)
-{
-       struct rnd_state *state = &get_cpu_var(net_rand_state);
-       u32 res;
-
-       res = prandom_u32_state(state);
-       put_cpu_var(net_rand_state);
-
-       return res;
-}
-EXPORT_SYMBOL(prandom_u32);
-
 /**
  *     prandom_bytes_state - get the requested number of pseudo-random bytes
  *
@@ -118,20 +89,6 @@ void prandom_bytes_state(struct rnd_state *state, void 
*buf, size_t bytes)
 }
 EXPORT_SYMBOL(prandom_bytes_state);
 
-/**
- *     prandom_bytes - get the requested number of pseudo-random bytes
- *     @buf: where to copy the pseudo-random bytes to
- *     @bytes: the requested number of bytes
- */
-void prandom_bytes(void *buf, size_t bytes)
-{
-       struct rnd_state *state = &get_cpu_var(net_rand_state);
-
-       prandom_bytes_state(state, buf, bytes);
-       put_cpu_var(net_rand_state);
-}
-EXPORT_SYMBOL(prandom_bytes);
-
 static void prandom_warmup(struct rnd_state *state)
 {
        /* Calling RNG ten times to satisfy recurrence condition */
@@ -147,96 +104,6 @@ static void prandom_warmup(struct rnd_state *state)
        prandom_u32_state(state);
 }
 
-static u32 __extract_hwseed(void)
-{
-       unsigned int val = 0;
-
-       (void)(arch_get_random_seed_int(&val) ||
-              arch_get_random_int(&val));
-
-       return val;
-}
-
-static void prandom_seed_early(struct rnd_state *state, u32 seed,
-                              bool mix_with_hwseed)
-{
-#define LCG(x)  ((x) * 69069U) /* super-duper LCG */
-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
-       state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
-       state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
-       state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
-       state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
-}
-
-/**
- *     prandom_seed - add entropy to pseudo random number generator
- *     @seed: seed value
- *
- *     Add some additional seeding to the prandom pool.
- */
-void prandom_seed(u32 entropy)
-{
-       int i;
-       /*
-        * No locking on the CPUs, but then somewhat random results are, well,
-        * expected.
-        */
-       for_each_possible_cpu(i) {
-               struct rnd_state *state = &per_cpu(net_rand_state, i);
-
-               state->s1 = __seed(state->s1 ^ entropy, 2U);
-               prandom_warmup(state);
-       }
-}
-EXPORT_SYMBOL(prandom_seed);
-
-/*
- *     Generate some initially weak seeding values to allow
- *     to start the prandom_u32() engine.
- */
-static int __init prandom_init(void)
-{
-       int i;
-
-       prandom_state_selftest();
-
-       for_each_possible_cpu(i) {
-               struct rnd_state *state = &per_cpu(net_rand_state, i);
-               u32 weak_seed = (i + jiffies) ^ random_get_entropy();
-
-               prandom_seed_early(state, weak_seed, true);
-               prandom_warmup(state);
-       }
-
-       return 0;
-}
-core_initcall(prandom_init);
-
-static void __prandom_timer(unsigned long dontcare);
-
-static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
-
-static void __prandom_timer(unsigned long dontcare)
-{
-       u32 entropy;
-       unsigned long expires;
-
-       get_random_bytes(&entropy, sizeof(entropy));
-       prandom_seed(entropy);
-
-       /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
-       expires = 40 + prandom_u32_max(40);
-       seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
-
-       add_timer(&seed_timer);
-}
-
-static void __init __prandom_start_seed_timer(void)
-{
-       seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
-       add_timer(&seed_timer);
-}
-
 void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
 {
        int i;
@@ -256,51 +123,6 @@ void prandom_seed_full_state(struct rnd_state __percpu 
*pcpu_state)
 }
 EXPORT_SYMBOL(prandom_seed_full_state);
 
-/*
- *     Generate better values after random number generator
- *     is fully initialized.
- */
-static void __prandom_reseed(bool late)
-{
-       unsigned long flags;
-       static bool latch = false;
-       static DEFINE_SPINLOCK(lock);
-
-       /* Asking for random bytes might result in bytes getting
-        * moved into the nonblocking pool and thus marking it
-        * as initialized. In this case we would double back into
-        * this function and attempt to do a late reseed.
-        * Ignore the pointless attempt to reseed again if we're
-        * already waiting for bytes when the nonblocking pool
-        * got initialized.
-        */
-
-       /* only allow initial seeding (late == false) once */
-       if (!spin_trylock_irqsave(&lock, flags))
-               return;
-
-       if (latch && !late)
-               goto out;
-
-       latch = true;
-       prandom_seed_full_state(&net_rand_state);
-out:
-       spin_unlock_irqrestore(&lock, flags);
-}
-
-void prandom_reseed_late(void)
-{
-       __prandom_reseed(true);
-}
-
-static int __init prandom_reseed(void)
-{
-       __prandom_reseed(false);
-       __prandom_start_seed_timer();
-       return 0;
-}
-late_initcall(prandom_reseed);
-
 #ifdef CONFIG_RANDOM32_SELFTEST
 static struct prandom_test1 {
        u32 seed;
@@ -420,7 +242,28 @@ static struct prandom_test2 {
        {  407983964U, 921U,  728767059U },
 };
 
-static void __init prandom_state_selftest(void)
+static u32 __extract_hwseed(void)
+{
+       unsigned int val = 0;
+
+       (void)(arch_get_random_seed_int(&val) ||
+              arch_get_random_int(&val));
+
+       return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+                              bool mix_with_hwseed)
+{
+#define LCG(x)  ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+       state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
+       state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
+       state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
+       state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+}
+
+static int __init prandom_state_selftest(void)
 {
        int i, j, errors = 0, runs = 0;
        bool error = false;
@@ -460,5 +303,266 @@ static void __init prandom_state_selftest(void)
                pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
        else
                pr_info("prandom: %d self tests passed\n", runs);
+       return 0;
 }
+core_initcall(prandom_state_selftest);
 #endif
+
+/*
+ * The prandom_u32() implementation is now completely separate from the
+ * prandom_state() functions, which are retained (for now) for compatibility.
+ *
+ * Because of (ab)use in the networking code for choosing random TCP/UDP port
+ * numbers, which open DoS possibilities if guessable, we want something
+ * stronger than a standard PRNG.  But the performance requirements of
+ * the network code do not allow robust crypto for this application.
+ *
+ * So this is a homebrew Junior Spaceman implementation, based on the
+ * lowest-latency trustworthy crypto primitive available, SipHash.
+ * (The authors of SipHash have not been consulted about this abuse of
+ * their work.)
+ *
+ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
+ * one word of output.  This abbreviated version uses 2 rounds per word
+ * of output.
+ */
+
+struct siprand_state {
+       unsigned long v0;
+       unsigned long v1;
+       unsigned long v2;
+       unsigned long v3;
+};
+
+static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
+
+/*
+ * This is the core CPRNG function.  As "pseudorandom", this is not used
+ * for truly valuable things, just intended to be a PITA to guess.
+ * For maximum speed, we do just two SipHash rounds per word.  This is
+ * the same rate as 4 rounds per 64 bits that SipHash normally uses,
+ * so hopefully it's reasonably secure.
+ *
+ * There are two changes from the official SipHash finalization:
+ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
+ *   they are there only to make the output rounds distinct from the input
+ *   rounds, and this application has no input rounds.
+ * - Rather than returning v0^v1^v2^v3, return v1+v3.
+ *   If you look at the SipHash round, the last operation on v3 is
+ *   "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
+ *   Likewise "v1 ^= v2".  (The rotate of v2 makes a difference, but
+ *   it still cancels out half of the bits in v2 for no benefit.)
+ *   Second, since the last combining operation was xor, continue the
+ *   pattern of alternating xor/add for a tiny bit of extra non-linearity.
+ */
+static inline u32 siprand_u32(struct siprand_state *s)
+{
+       unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
+
+       PRND_SIPROUND(v0, v1, v2, v3);
+       PRND_SIPROUND(v0, v1, v2, v3);
+       s->v0 = v0;  s->v1 = v1;  s->v2 = v2;  s->v3 = v3;
+       return v1 + v3;
+}
+
+
+/**
+ *     prandom_u32 - pseudo random number generator
+ *
+ *     A 32 bit pseudo-random number is generated using a fast
+ *     algorithm suitable for simulation. This algorithm is NOT
+ *     considered safe for cryptographic use.
+ */
+u32 prandom_u32(void)
+{
+       struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+       u32 res = siprand_u32(state);
+
+       put_cpu_ptr(&net_rand_state);
+       return res;
+}
+EXPORT_SYMBOL(prandom_u32);
+
+/**
+ *     prandom_bytes - get the requested number of pseudo-random bytes
+ *     @buf: where to copy the pseudo-random bytes to
+ *     @bytes: the requested number of bytes
+ */
+void prandom_bytes(void *buf, size_t bytes)
+{
+       struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+       u8 *ptr = buf;
+
+       while (bytes >= sizeof(u32)) {
+               put_unaligned(siprand_u32(state), (u32 *)ptr);
+               ptr += sizeof(u32);
+               bytes -= sizeof(u32);
+       }
+
+       if (bytes > 0) {
+               u32 rem = siprand_u32(state);
+
+               do {
+                       *ptr++ = (u8)rem;
+                       rem >>= BITS_PER_BYTE;
+               } while (--bytes > 0);
+       }
+       put_cpu_ptr(&net_rand_state);
+}
+EXPORT_SYMBOL(prandom_bytes);
+
+/**
+ *     prandom_seed - add entropy to pseudo random number generator
+ *     @entropy: entropy value
+ *
+ *     Add some additional seed material to the prandom pool.
+ *     The "entropy" is actually our IP address (the only caller is
+ *     the network code), not for unpredictability, but to ensure that
+ *     different machines are initialized differently.
+ */
+void prandom_seed(u32 entropy)
+{
+       int i;
+
+       add_device_randomness(&entropy, sizeof(entropy));
+
+       for_each_possible_cpu(i) {
+               struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
+               unsigned long v0 = state->v0, v1 = state->v1;
+               unsigned long v2 = state->v2, v3 = state->v3;
+
+               do {
+                       v3 ^= entropy;
+                       PRND_SIPROUND(v0, v1, v2, v3);
+                       PRND_SIPROUND(v0, v1, v2, v3);
+                       v0 ^= entropy;
+               } while (unlikely(!v0 || !v1 || !v2 || !v3));
+
+               WRITE_ONCE(state->v0, v0);
+               WRITE_ONCE(state->v1, v1);
+               WRITE_ONCE(state->v2, v2);
+               WRITE_ONCE(state->v3, v3);
+       }
+}
+EXPORT_SYMBOL(prandom_seed);
+
+/*
+ *     Generate some initially weak seeding values to allow
+ *     the prandom_u32() engine to be started.
+ */
+static int __init prandom_init_early(void)
+{
+       int i;
+       unsigned long v0, v1, v2, v3;
+
+       if (!arch_get_random_long(&v0))
+               v0 = jiffies;
+       if (!arch_get_random_long(&v1))
+               v1 = random_get_entropy();
+       v2 = v0 ^ PRND_K0;
+       v3 = v1 ^ PRND_K1;
+
+       for_each_possible_cpu(i) {
+               struct siprand_state *state;
+
+               v3 ^= i;
+               PRND_SIPROUND(v0, v1, v2, v3);
+               PRND_SIPROUND(v0, v1, v2, v3);
+               v0 ^= i;
+
+               state = per_cpu_ptr(&net_rand_state, i);
+               state->v0 = v0;  state->v1 = v1;
+               state->v2 = v2;  state->v3 = v3;
+       }
+
+       return 0;
+}
+core_initcall(prandom_init_early);
+
+
+/* Stronger reseeding when available, and periodically thereafter. */
+static void prandom_reseed(unsigned long dontcare);
+
+static DEFINE_TIMER(seed_timer, prandom_reseed, 0, 0);
+
+static void prandom_reseed(unsigned long dontcare)
+{
+       unsigned long expires;
+       int i;
+
+       /*
+        * Reinitialize each CPU's PRNG with 128 bits of key.
+        * No locking on the CPUs, but then somewhat random results are,
+        * well, expected.
+        */
+       for_each_possible_cpu(i) {
+               struct siprand_state *state;
+               unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
+               unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
+#if BITS_PER_LONG == 32
+               int j;
+
+               /*
+                * On 32-bit machines, hash in two extra words to
+                * approximate 128-bit key length.  Not that the hash
+                * has that much security, but this prevents a trivial
+                * 64-bit brute force.
+                */
+               for (j = 0; j < 2; j++) {
+                       unsigned long m = get_random_long();
+
+                       v3 ^= m;
+                       PRND_SIPROUND(v0, v1, v2, v3);
+                       PRND_SIPROUND(v0, v1, v2, v3);
+                       v0 ^= m;
+               }
+#endif
+               /*
+                * Probably impossible in practice, but there is a
+                * theoretical risk that a race between this reseeding
+                * and the target CPU writing its state back could
+                * create the all-zero SipHash fixed point.
+                *
+                * To ensure that never happens, ensure the state
+                * we write contains no zero words.
+                */
+               state = per_cpu_ptr(&net_rand_state, i);
+               WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
+               WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
+               WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
+               WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
+       }
+
+       /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
+       expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
+       mod_timer(&seed_timer, expires);
+}
+
+/*
+ * The random ready callback can be called from almost any interrupt.
+ * To avoid worrying about whether it's safe to delay that interrupt
+ * long enough to seed all CPUs, just schedule an immediate timer event.
+ */
+static void prandom_timer_start(struct random_ready_callback *unused)
+{
+       mod_timer(&seed_timer, jiffies);
+}
+
+/*
+ * Start periodic full reseeding as soon as strong
+ * random numbers are available.
+ */
+static int __init prandom_init_late(void)
+{
+       static struct random_ready_callback random_ready = {
+               .func = prandom_timer_start
+       };
+       int ret = add_random_ready_callback(&random_ready);
+
+       if (ret == -EALREADY) {
+               prandom_timer_start(&random_ready);
+               ret = 0;
+       }
+       return ret;
+}
+late_initcall(prandom_init_late);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 7ff9dc36c2f8..74b5b8862198 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -199,6 +199,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
                io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
        }
        io_tlb_index = 0;
+       no_iotlb_memory = false;
 
        if (verbose)
                swiotlb_print_info();
@@ -229,9 +230,11 @@ swiotlb_init(int verbose)
        if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
                return;
 
-       if (io_tlb_start)
+       if (io_tlb_start) {
                memblock_free_early(io_tlb_start,
                                    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+               io_tlb_start = 0;
+       }
        pr_warn("Cannot allocate buffer");
        no_iotlb_memory = true;
 }
@@ -330,6 +333,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
                io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
        }
        io_tlb_index = 0;
+       no_iotlb_memory = false;
 
        swiotlb_print_info();
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a2be65bf5d8c..2f443767fd1b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -487,7 +487,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long 
addr,
        struct queue_pages *qp = walk->private;
        unsigned long flags = qp->flags;
        int nid, ret;
-       pte_t *pte;
+       pte_t *pte, *mapped_pte;
        spinlock_t *ptl;
 
        if (pmd_trans_huge(*pmd)) {
@@ -515,7 +515,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long 
addr,
        if (pmd_trans_unstable(pmd))
                return 0;
 retry:
-       pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+       mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                if (!pte_present(*pte))
                        continue;
@@ -554,7 +554,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long 
addr,
                } else
                        break;
        }
-       pte_unmap_unlock(pte - 1, ptl);
+       pte_unmap_unlock(mapped_pte, ptl);
        cond_resched();
        return addr != end ? -EIO : 0;
 }
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b596c413d297..bcf743432c8d 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -304,7 +304,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
        __u32 cookie = ntohl(th->ack_seq) - 1;
        struct sock *ret = sk;
        struct request_sock *req;
-       int mss;
+       int full_space, mss;
        struct rtable *rt;
        __u8 rcv_wscale;
        struct flowi4 fl4;
@@ -388,8 +388,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
 
        /* Try to redo what tcp_v4_send_synack did. */
        req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, 
RTAX_WINDOW);
+       /* limit the window selection if the user enforce a smaller rx buffer */
+       full_space = tcp_full_space(sk);
+       if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+           (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
+               req->rsk_window_clamp = full_space;
 
-       tcp_select_initial_window(tcp_full_space(sk), req->mss,
+       tcp_select_initial_window(full_space, req->mss,
                                  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
                                  ireq->wscale_ok, &rcv_wscale,
                                  dst_metric(&rt->dst, RTAX_INITRWND));
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 16eba7b5f1a9..df705303992e 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1072,7 +1072,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
        if (tdev && !netif_is_l3_master(tdev)) {
                int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
-               dev->hard_header_len = tdev->hard_header_len + sizeof(struct 
iphdr);
                dev->mtu = tdev->mtu - t_hlen;
                if (dev->mtu < IPV6_MIN_MTU)
                        dev->mtu = IPV6_MIN_MTU;
@@ -1372,7 +1371,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        dev->destructor         = ipip6_dev_free;
 
        dev->type               = ARPHRD_SIT;
-       dev->hard_header_len    = LL_MAX_HEADER + t_hlen;
        dev->mtu                = ETH_DATA_LEN - t_hlen;
        dev->flags              = IFF_NOARP;
        netif_keep_dst(dev);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 4834015b27f4..0e368c1dd78b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -143,7 +143,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
        __u32 cookie = ntohl(th->ack_seq) - 1;
        struct sock *ret = sk;
        struct request_sock *req;
-       int mss;
+       int full_space, mss;
        struct dst_entry *dst;
        __u8 rcv_wscale;
 
@@ -236,7 +236,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
        }
 
        req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, 
RTAX_WINDOW);
-       tcp_select_initial_window(tcp_full_space(sk), req->mss,
+       /* limit the window selection if the user enforce a smaller rx buffer */
+       full_space = tcp_full_space(sk);
+       if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+           (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
+               req->rsk_window_clamp = full_space;
+
+       tcp_select_initial_window(full_space, req->mss,
                                  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
                                  ireq->wscale_ok, &rcv_wscale,
                                  dst_metric(dst, RTAX_INITRWND));
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 02e10deef5b4..ba81655a584b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1542,7 +1542,8 @@ static int iucv_sock_shutdown(struct socket *sock, int 
how)
                break;
        }
 
-       if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
+       if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
+           sk->sk_state == IUCV_CONNECTED) {
                if (iucv->transport == AF_IUCV_TRANS_IUCV) {
                        txmsg.class = 0;
                        txmsg.tag = 0;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 6216279efc46..eebbddccb47b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1847,19 +1847,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data 
*sdata,
 
 /* device xmit handlers */
 
+enum ieee80211_encrypt {
+       ENCRYPT_NO,
+       ENCRYPT_MGMT,
+       ENCRYPT_DATA,
+};
+
 static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
                                struct sk_buff *skb,
-                               int head_need, bool may_encrypt)
+                               int head_need,
+                               enum ieee80211_encrypt encrypt)
 {
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_hdr *hdr;
        bool enc_tailroom;
        int tail_need = 0;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
-       enc_tailroom = may_encrypt &&
-                      (sdata->crypto_tx_tailroom_needed_cnt ||
-                       ieee80211_is_mgmt(hdr->frame_control));
+       enc_tailroom = encrypt == ENCRYPT_MGMT ||
+                      (encrypt == ENCRYPT_DATA &&
+                       sdata->crypto_tx_tailroom_needed_cnt);
 
        if (enc_tailroom) {
                tail_need = IEEE80211_ENCRYPT_TAILROOM;
@@ -1892,21 +1897,27 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        int headroom;
-       bool may_encrypt;
+       enum ieee80211_encrypt encrypt;
 
-       may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
+       if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
+               encrypt = ENCRYPT_NO;
+       else if (ieee80211_is_mgmt(hdr->frame_control))
+               encrypt = ENCRYPT_MGMT;
+       else
+               encrypt = ENCRYPT_DATA;
 
        headroom = local->tx_headroom;
-       if (may_encrypt)
+       if (encrypt != ENCRYPT_NO)
                headroom += sdata->encrypt_headroom;
        headroom -= skb_headroom(skb);
        headroom = max_t(int, 0, headroom);
 
-       if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
+       if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
                ieee80211_free_txskb(&local->hw, skb);
                return;
        }
 
+       /* reload after potential resize */
        hdr = (struct ieee80211_hdr *) skb->data;
        info->control.vif = &sdata->vif;
 
@@ -2688,7 +2699,7 @@ static struct sk_buff *ieee80211_build_hdr(struct 
ieee80211_sub_if_data *sdata,
                head_need += sdata->encrypt_headroom;
                head_need += local->tx_headroom;
                head_need = max_t(int, 0, head_need);
-               if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
+               if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
                        ieee80211_free_txskb(&local->hw, skb);
                        skb = NULL;
                        return ERR_PTR(-ENOMEM);
@@ -3313,7 +3324,7 @@ static bool ieee80211_xmit_fast(struct 
ieee80211_sub_if_data *sdata,
        if (unlikely(ieee80211_skb_resize(sdata, skb,
                                          max_t(int, extra_head + hw_headroom -
                                                     skb_headroom(skb), 0),
-                                         false))) {
+                                         ENCRYPT_NO))) {
                kfree_skb(skb);
                return true;
        }
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index a649763b854d..04da31c52d09 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2759,7 +2759,7 @@ static void print_rd_rules(const struct 
ieee80211_regdomain *rd)
                power_rule = &reg_rule->power_rule;
 
                if (reg_rule->flags & NL80211_RRF_AUTO_BW)
-                       snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
+                       snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO",
                                 freq_range->max_bandwidth_khz,
                                 reg_get_max_bandwidth(rd, reg_rule));
                else
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index a8ca79810dcc..3bec5c59169b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -823,7 +823,7 @@ static int x25_connect(struct socket *sock, struct sockaddr 
*uaddr,
        sock->state = SS_CONNECTED;
        rc = 0;
 out_put_neigh:
-       if (rc) {
+       if (rc && x25->neighbour) {
                read_lock_bh(&x25_list_lock);
                x25_neigh_put(x25->neighbour);
                x25->neighbour = NULL;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 0eb85765d35a..4d19f2ff6e05 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1591,6 +1591,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 
high)
        int err = -ENOENT;
        __be32 minspi = htonl(low);
        __be32 maxspi = htonl(high);
+       __be32 newspi = 0;
        u32 mark = x->mark.v & x->mark.m;
 
        spin_lock_bh(&x->lock);
@@ -1609,21 +1610,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 
high)
                        xfrm_state_put(x0);
                        goto unlock;
                }
-               x->id.spi = minspi;
+               newspi = minspi;
        } else {
                u32 spi = 0;
                for (h = 0; h < high-low+1; h++) {
                        spi = low + prandom_u32()%(high-low+1);
                        x0 = xfrm_state_lookup(net, mark, &x->id.daddr, 
htonl(spi), x->id.proto, x->props.family);
                        if (x0 == NULL) {
-                               x->id.spi = htonl(spi);
+                               newspi = htonl(spi);
                                break;
                        }
                        xfrm_state_put(x0);
                }
        }
-       if (x->id.spi) {
+       if (newspi) {
                spin_lock_bh(&net->xfrm.xfrm_state_lock);
+               x->id.spi = newspi;
                h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, 
x->props.family);
                hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
                spin_unlock_bh(&net->xfrm.xfrm_state_lock);
diff --git a/sound/hda/ext/hdac_ext_controller.c 
b/sound/hda/ext/hdac_ext_controller.c
index 261469188566..49d42971d90d 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -155,6 +155,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct 
hdac_ext_bus *ebus,
                return NULL;
        if (ebus->idx != bus_idx)
                return NULL;
+       if (addr < 0 || addr > 31)
+               return NULL;
 
        list_for_each_entry(hlink, &ebus->hlink_list, list) {
                for (i = 0; i < HDA_MAX_CODECS; i++) {
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 7e0573e55a35..89808ab008ad 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -482,6 +482,7 @@ static void perf_event__mmap2_swap(union perf_event *event,
        event->mmap2.maj   = bswap_32(event->mmap2.maj);
        event->mmap2.min   = bswap_32(event->mmap2.min);
        event->mmap2.ino   = bswap_64(event->mmap2.ino);
+       event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
 
        if (sample_id_all) {
                void *data = &event->mmap2.filename;
  • Linux 4.9.244 Greg Kroah-Hartman
    • Re: Linux 4.9.244 Greg Kroah-Hartman

Reply via email to