diff --git a/Documentation/networking/ip-sysctl.txt 
b/Documentation/networking/ip-sysctl.txt
index acdfb5d2bcaa..e2142fe40cda 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -422,6 +422,7 @@ tcp_min_rtt_wlen - INTEGER
        minimum RTT when it is moved to a longer path (e.g., due to traffic
        engineering). A longer window makes the filter more resistant to RTT
        inflations such as transient congestion. The unit is seconds.
+       Possible values: 0 - 86400 (1 day)
        Default: 300
 
 tcp_moderate_rcvbuf - BOOLEAN
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 187ce4f599a2..e4dfaf0d6e87 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the 
future and to
 increase the success rate of future high-order allocations such as SLUB
 allocations, THP and hugetlbfs pages.
 
-To make it sensible with respect to the watermark_scale_factor parameter,
-the unit is in fractions of 10,000. The default value of 15,000 means
-that up to 150% of the high watermark will be reclaimed in the event of
-a pageblock being mixed due to fragmentation. The level of reclaim is
-determined by the number of fragmentation events that occurred in the
-recent past. If this value is smaller than a pageblock then a pageblocks
-worth of pages will be reclaimed (e.g.  2MB on 64-bit x86). A boost factor
-of 0 will disable the feature.
+To make it sensible with respect to the watermark_scale_factor
+parameter, the unit is in fractions of 10,000. The default value of
+15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
+watermark will be reclaimed in the event of a pageblock being mixed due
+to fragmentation. The level of reclaim is determined by the number of
+fragmentation events that occurred in the recent past. If this value is
+smaller than a pageblock then a pageblocks worth of pages will be reclaimed
+(e.g.  2MB on 64-bit x86). A boost factor of 0 will disable the feature.
 
 =============================================================
 
diff --git a/Makefile b/Makefile
index b282c4143b21..c3daaefa979c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 5
 PATCHLEVEL = 0
-SUBLEVEL = 10
+SUBLEVEL = 11
 EXTRAVERSION =
 NAME = Shy Crocodile
 
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6c7ccb428c07..7135820f76d4 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
 
                @ Preserve return value of efi_entry() in r4
                mov     r4, r0
-               bl      cache_clean_flush
+
+               @ our cache maintenance code relies on CP15 barrier instructions
+               @ but since we arrived here with the MMU and caches configured
+               @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
+               @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
+               @ the enable path will be executed on v7+ only.
+               mrc     p15, 0, r1, c1, c0, 0   @ read SCTLR
+               tst     r1, #(1 << 5)           @ CP15BEN bit set?
+               bne     0f
+               orr     r1, r1, #(1 << 5)       @ CP15 barrier instructions
+               mcr     p15, 0, r1, c1, c0, 0   @ write SCTLR
+ ARM(          .inst   0xf57ff06f              @ v7+ isb       )
+ THUMB(                isb                                             )
+
+0:             bl      cache_clean_flush
                bl      cache_off
 
                @ Set parameters for booting zImage according to boot protocol
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 7205a9085b4d..c9411774555d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -406,7 +406,7 @@ void __init arm64_memblock_init(void)
                 * Otherwise, this is a no-op
                 */
                u64 base = phys_initrd_start & PAGE_MASK;
-               u64 size = PAGE_ALIGN(phys_initrd_size);
+               u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - 
base;
 
                /*
                 * We can only add back the initrd memory if we don't end up
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f158c5894a9a..feb2653490df 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -125,7 +125,7 @@ trace_a_syscall:
        subu    t1, v0,  __NR_O32_Linux
        move    a1, v0
        bnez    t1, 1f /* __NR_syscall at offset 0 */
-       lw      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+       ld      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
        .set    pop
 
 1:     jal     syscall_trace_enter
diff --git a/arch/powerpc/configs/skiroot_defconfig 
b/arch/powerpc/configs/skiroot_defconfig
index cfdd08897a06..e2b0c5f15c7b 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -260,6 +260,7 @@ CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_HUGETLBFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS=y
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S 
b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 1e0bc5955a40..afd516b572f8 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
         * can be used, r7 contains NSEC_PER_SEC.
         */
 
-       lwz     r5,WTOM_CLOCK_SEC(r9)
+       lwz     r5,(WTOM_CLOCK_SEC+LOPART)(r9)
        lwz     r6,WTOM_CLOCK_NSEC(r9)
 
        /* We now have our offset in r5,r6. We create a fake dependency
diff --git a/arch/powerpc/platforms/Kconfig.cputype 
b/arch/powerpc/platforms/Kconfig.cputype
index 8c7464c3f27f..2782188a5ba1 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -318,7 +318,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
 
 config PPC_RADIX_MMU
        bool "Radix MMU Support"
-       depends on PPC_BOOK3S_64
+       depends on PPC_BOOK3S_64 && HUGETLB_PAGE
        select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
        default y
        help
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 9c5a67d1b9c1..c0c7291d4ccf 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -217,6 +217,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 # Avoid indirect branches in kernel to deal with Spectre
 ifdef CONFIG_RETPOLINE
   KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+  # Additionally, avoid generating expensive indirect jumps which
+  # are subject to retpolines for small number of switch cases.
+  # clang turns off jump table generation by default when under
+  # retpoline builds, however, gcc does not for x86. This has
+  # only been fixed starting from gcc stable version 8.4.0 and
+  # onwards, but not for older ones. See gcc bug #86952.
+  ifndef CONFIG_CC_IS_CLANG
+    KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
+  endif
 endif
 
 archscripts: scripts_basic
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index d2e780705c5a..56194c571299 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -76,15 +76,15 @@
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
- *                            Available model: HSW ULT,CNL
+ *                            Available model: HSW ULT,KBL,CNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
- *                            Available model: HSW ULT,CNL
+ *                            Available model: HSW ULT,KBL,CNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
- *                            Available model: HSW ULT,GLM,CNL
+ *                            Available model: HSW ULT,KBL,GLM,CNL
  *                            Scope: Package (physical package)
  *
  */
@@ -572,8 +572,8 @@ static const struct x86_cpu_id intel_cstates_match[] 
__initconst = {
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
 
-       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
-       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  hswult_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
 
        X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
 
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index e5ed28629271..72510c470001 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2804,7 +2804,7 @@ static void bfq_dispatch_remove(struct request_queue *q, 
struct request *rq)
        bfq_remove_request(q, rq);
 }
 
-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 {
        /*
         * If this bfqq is shared between multiple processes, check
@@ -2837,9 +2837,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, 
struct bfq_queue *bfqq)
        /*
         * All in-service entities must have been properly deactivated
         * or requeued before executing the next function, which
-        * resets all in-service entites as no more in service.
+        * resets all in-service entities as no more in service. This
+        * may cause bfqq to be freed. If this happens, the next
+        * function returns true.
         */
-       __bfq_bfqd_reset_in_service(bfqd);
+       return __bfq_bfqd_reset_in_service(bfqd);
 }
 
 /**
@@ -3244,7 +3246,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
        bool slow;
        unsigned long delta = 0;
        struct bfq_entity *entity = &bfqq->entity;
-       int ref;
 
        /*
         * Check whether the process is slow (see bfq_bfqq_is_slow).
@@ -3313,10 +3314,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
         * reason.
         */
        __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
-       ref = bfqq->ref;
-       __bfq_bfqq_expire(bfqd, bfqq);
-
-       if (ref == 1) /* bfqq is gone, no more actions on it */
+       if (__bfq_bfqq_expire(bfqd, bfqq))
+               /* bfqq is gone, no more actions on it */
                return;
 
        bfqq->injected_service = 0;
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 746bd570b85a..ca98c98a8179 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -993,7 +993,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
                             bool ins_into_idle_tree);
 bool next_queue_may_preempt(struct bfq_data *bfqd);
 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                         bool ins_into_idle_tree, bool expiration);
 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 4aab1a8191f0..8077bf71d2ac 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1599,7 +1599,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data 
*bfqd)
        return bfqq;
 }
 
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+/* returns true if the in-service queue gets freed */
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
 {
        struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
        struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
@@ -1623,8 +1624,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
         * service tree either, then release the service reference to
         * the queue it represents (taken with bfq_get_entity).
         */
-       if (!in_serv_entity->on_st)
+       if (!in_serv_entity->on_st) {
+               /*
+                * If no process is referencing in_serv_bfqq any
+                * longer, then the service reference may be the only
+                * reference to the queue. If this is the case, then
+                * bfqq gets freed here.
+                */
+               int ref = in_serv_bfqq->ref;
                bfq_put_queue(in_serv_bfqq);
+               if (ref == 1)
+                       return true;
+       }
+
+       return false;
 }
 
 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 0430ccd08728..08a0e458bc3e 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -212,8 +212,12 @@ static void crypt_done(struct crypto_async_request *areq, 
int err)
 {
        struct skcipher_request *req = areq->data;
 
-       if (!err)
+       if (!err) {
+               struct rctx *rctx = skcipher_request_ctx(req);
+
+               rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                err = xor_tweak_post(req);
+       }
 
        skcipher_request_complete(req, err);
 }
diff --git a/crypto/xts.c b/crypto/xts.c
index 847f54f76789..2f948328cabb 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, 
int err)
 {
        struct skcipher_request *req = areq->data;
 
-       if (!err)
+       if (!err) {
+               struct rctx *rctx = skcipher_request_ctx(req);
+
+               rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                err = xor_tweak_post(req);
+       }
 
        skcipher_request_complete(req, err);
 }
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 022cd80e80cc..a6e556bf62df 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -959,14 +959,13 @@ enum lru_status binder_alloc_free_page(struct list_head 
*item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+       mm = alloc->vma_vm_mm;
+       if (!mmget_not_zero(mm))
+               goto err_mmget;
+       if (!down_write_trylock(&mm->mmap_sem))
+               goto err_down_write_mmap_sem_failed;
        vma = binder_alloc_get_vma(alloc);
-       if (vma) {
-               if (!mmget_not_zero(alloc->vma_vm_mm))
-                       goto err_mmget;
-               mm = alloc->vma_vm_mm;
-               if (!down_write_trylock(&mm->mmap_sem))
-                       goto err_down_write_mmap_sem_failed;
-       }
 
        list_lru_isolate(lru, item);
        spin_unlock(lock);
@@ -979,10 +978,9 @@ enum lru_status binder_alloc_free_page(struct list_head 
*item,
                               PAGE_SIZE);
 
                trace_binder_unmap_user_end(alloc, index);
-
-               up_write(&mm->mmap_sem);
-               mmput(mm);
        }
+       up_write(&mm->mmap_sem);
+       mmput(mm);
 
        trace_binder_unmap_kernel_start(alloc, index);
 
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9a8d83bc1e75..fc7aefd42ae0 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1111,8 +1111,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool 
release)
                        err = __blkdev_reread_part(bdev);
                else
                        err = blkdev_reread_part(bdev);
-               pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
-                       __func__, lo_number, err);
+               if (err)
+                       pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
+                               __func__, lo_number, err);
                /* Device is gone, no point in returning error */
                err = 0;
        }
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 684854d3b0ad..7e57f8f012c3 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -774,18 +774,18 @@ struct zram_work {
        struct zram *zram;
        unsigned long entry;
        struct bio *bio;
+       struct bio_vec bvec;
 };
 
 #if PAGE_SIZE != 4096
 static void zram_sync_read(struct work_struct *work)
 {
-       struct bio_vec bvec;
        struct zram_work *zw = container_of(work, struct zram_work, work);
        struct zram *zram = zw->zram;
        unsigned long entry = zw->entry;
        struct bio *bio = zw->bio;
 
-       read_from_bdev_async(zram, &bvec, entry, bio);
+       read_from_bdev_async(zram, &zw->bvec, entry, bio);
 }
 
 /*
@@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct 
bio_vec *bvec,
 {
        struct zram_work work;
 
+       work.bvec = *bvec;
        work.zram = zram;
        work.entry = entry;
        work.bio = bio;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 131f3974740d..814853842e29 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
        mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
 #else
-       mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
+       mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
 #endif
 
        /* setup the length */
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b4f25698169..e2a5398f89b5 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct 
rcar_dmac_chan *chan,
        enum dma_status status;
        unsigned int residue = 0;
        unsigned int dptr = 0;
+       unsigned int chcrb;
+       unsigned int tcrb;
+       unsigned int i;
 
        if (!desc)
                return 0;
@@ -1329,6 +1332,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct 
rcar_dmac_chan *chan,
                return 0;
        }
 
+       /*
+        * We need to read two registers.
+        * Make sure the control register does not skip to next chunk
+        * while reading the counter.
+        * Trying it 3 times should be enough: Initial read, retry, retry
+        * for the paranoid.
+        */
+       for (i = 0; i < 3; i++) {
+               chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+                                           RCAR_DMACHCRB_DPTR_MASK;
+               tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
+               /* Still the same? */
+               if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+                             RCAR_DMACHCRB_DPTR_MASK))
+                       break;
+       }
+       WARN_ONCE(i >= 3, "residue might be not continuous!");
+
        /*
         * In descriptor mode the descriptor running pointer is not maintained
         * by the interrupt handler, find the running descriptor from the
@@ -1336,8 +1357,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct 
rcar_dmac_chan *chan,
         * mode just use the running descriptor pointer.
         */
        if (desc->hwdescs.use) {
-               dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
-                       RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+               dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
                if (dptr == 0)
                        dptr = desc->nchunks;
                dptr--;
@@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct 
rcar_dmac_chan *chan,
        }
 
        /* Add the residue for the current chunk. */
-       residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
+       residue += tcrb << desc->xfer_shift;
 
        return residue;
 }
@@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct 
dma_chan *chan,
        enum dma_status status;
        unsigned long flags;
        unsigned int residue;
+       bool cyclic;
 
        status = dma_cookie_status(chan, cookie, txstate);
        if (status == DMA_COMPLETE || !txstate)
@@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct 
dma_chan *chan,
 
        spin_lock_irqsave(&rchan->lock, flags);
        residue = rcar_dmac_chan_get_residue(rchan, cookie);
+       cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
        spin_unlock_irqrestore(&rchan->lock, flags);
 
        /* if there's no residue, the cookie is complete */
-       if (!residue)
+       if (!residue && !cyclic)
                return DMA_COMPLETE;
 
        dma_set_residue(txstate, residue);
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e41223c05f6e..6cf2e2ce4093 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, 
unsigned int flow_type)
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_BOTH:
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c 
b/drivers/gpu/drm/i915/intel_fbdev.c
index 4ee16b264dbe..7f365ac0b549 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper 
*fb_helper,
                                    bool *enabled, int width, int height)
 {
        struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
+       unsigned long conn_configured, conn_seq, mask;
        unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
-       unsigned long conn_configured, conn_seq;
        int i, j;
        bool *save_enabled;
        bool fallback = true, ret = true;
@@ -355,9 +355,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper 
*fb_helper,
                drm_modeset_backoff(&ctx);
 
        memcpy(save_enabled, enabled, count);
-       conn_seq = GENMASK(count - 1, 0);
+       mask = GENMASK(count - 1, 0);
        conn_configured = 0;
 retry:
+       conn_seq = conn_configured;
        for (i = 0; i < count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
@@ -370,8 +371,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper 
*fb_helper,
                if (conn_configured & BIT(i))
                        continue;
 
-               /* First pass, only consider tiled connectors */
-               if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
+               if (conn_seq == 0 && !connector->has_tile)
                        continue;
 
                if (connector->status == connector_status_connected)
@@ -475,10 +475,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper 
*fb_helper,
                conn_configured |= BIT(i);
        }
 
-       if (conn_configured != conn_seq) { /* repeat until no more are found */
-               conn_seq = conn_configured;
+       if ((conn_configured & mask) != mask && conn_configured != conn_seq)
                goto retry;
-       }
 
        /*
         * If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0ec08394e17a..996cadd83f24 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
  * ttm_global_mutex - protecting the global BO state
  */
 DEFINE_MUTEX(ttm_global_mutex);
-struct ttm_bo_global ttm_bo_glob = {
-       .use_count = 0
-};
+unsigned ttm_bo_glob_use_count;
+struct ttm_bo_global ttm_bo_glob;
 
 static struct attribute ttm_bo_count = {
        .name = "bo_count",
@@ -1535,12 +1534,13 @@ static void ttm_bo_global_release(void)
        struct ttm_bo_global *glob = &ttm_bo_glob;
 
        mutex_lock(&ttm_global_mutex);
-       if (--glob->use_count > 0)
+       if (--ttm_bo_glob_use_count > 0)
                goto out;
 
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
        ttm_mem_global_release(&ttm_mem_glob);
+       memset(glob, 0, sizeof(*glob));
 out:
        mutex_unlock(&ttm_global_mutex);
 }
@@ -1552,7 +1552,7 @@ static int ttm_bo_global_init(void)
        unsigned i;
 
        mutex_lock(&ttm_global_mutex);
-       if (++glob->use_count > 1)
+       if (++ttm_bo_glob_use_count > 1)
                goto out;
 
        ret = ttm_mem_global_init(&ttm_mem_glob);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index f1567c353b54..9a0909decb36 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -461,8 +461,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
 
 void ttm_mem_global_release(struct ttm_mem_global *glob)
 {
-       unsigned int i;
        struct ttm_mem_zone *zone;
+       unsigned int i;
 
        /* let the page allocator first stop the shrink work. */
        ttm_page_alloc_fini();
@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
                zone = glob->zones[i];
                kobject_del(&zone->kobj);
                kobject_put(&zone->kobj);
-                       }
+       }
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
+       memset(glob, 0, sizeof(*glob));
 }
 
 static void ttm_check_swapping(struct ttm_mem_global *glob)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 3ce136ba8791..2ae4ece0dcea 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -999,7 +999,7 @@ static void
 vc4_crtc_reset(struct drm_crtc *crtc)
 {
        if (crtc->state)
-               __drm_atomic_helper_crtc_destroy_state(crtc->state);
+               vc4_crtc_destroy_state(crtc, crtc->state);
 
        crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
        if (crtc->state)
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index cc287cf6eb29..edc52d75e6bd 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -616,7 +616,7 @@ static void intel_th_gth_unassign(struct intel_th_device 
*thdev,
        othdev->output.port = -1;
        othdev->output.active = false;
        gth->output[port].output = NULL;
-       for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
+       for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
                if (gth->master[master] == port)
                        gth->master[master] = -1;
        spin_unlock(&gth->gth_lock);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index ea0bc6885517..32cc8fe7902f 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -160,6 +160,7 @@ struct ib_uverbs_file {
 
        struct mutex umap_lock;
        struct list_head umaps;
+       struct page *disassociate_page;
 
        struct idr              idr;
        /* spinlock protects write access to idr */
diff --git a/drivers/infiniband/core/uverbs_main.c 
b/drivers/infiniband/core/uverbs_main.c
index e2a4570a47e8..27ca4022ca70 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
                kref_put(&file->async_file->ref,
                         ib_uverbs_release_async_event_file);
        put_device(&file->device->dev);
+
+       if (file->disassociate_page)
+               __free_pages(file->disassociate_page, 0);
        kfree(file);
 }
 
@@ -876,9 +879,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
        kfree(priv);
 }
 
+/*
+ * Once the zap_vma_ptes has been called touches to the VMA will come here and
+ * we return a dummy writable zero page for all the pfns.
+ */
+static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
+{
+       struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
+       struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
+       vm_fault_t ret = 0;
+
+       if (!priv)
+               return VM_FAULT_SIGBUS;
+
+       /* Read only pages can just use the system zero page. */
+       if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
+               vmf->page = ZERO_PAGE(vmf->address);
+               get_page(vmf->page);
+               return 0;
+       }
+
+       mutex_lock(&ufile->umap_lock);
+       if (!ufile->disassociate_page)
+               ufile->disassociate_page =
+                       alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
+
+       if (ufile->disassociate_page) {
+               /*
+                * This VMA is forced to always be shared so this doesn't have
+                * to worry about COW.
+                */
+               vmf->page = ufile->disassociate_page;
+               get_page(vmf->page);
+       } else {
+               ret = VM_FAULT_SIGBUS;
+       }
+       mutex_unlock(&ufile->umap_lock);
+
+       return ret;
+}
+
 static const struct vm_operations_struct rdma_umap_ops = {
        .open = rdma_umap_open,
        .close = rdma_umap_close,
+       .fault = rdma_umap_fault,
 };
 
 static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
@@ -888,6 +932,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct 
ib_ucontext *ucontext,
        struct ib_uverbs_file *ufile = ucontext->ufile;
        struct rdma_umap_priv *priv;
 
+       if (!(vma->vm_flags & VM_SHARED))
+               return ERR_PTR(-EINVAL);
+
        if (vma->vm_end - vma->vm_start != size)
                return ERR_PTR(-EINVAL);
 
@@ -991,7 +1038,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file 
*ufile)
                 * at a time to get the lock ordering right. Typically there
                 * will only be one mm, so no big deal.
                 */
-               down_write(&mm->mmap_sem);
+               down_read(&mm->mmap_sem);
                if (!mmget_still_valid(mm))
                        goto skip_mm;
                mutex_lock(&ufile->umap_lock);
@@ -1005,11 +1052,10 @@ void uverbs_user_mmap_disassociate(struct 
ib_uverbs_file *ufile)
 
                        zap_vma_ptes(vma, vma->vm_start,
                                     vma->vm_end - vma->vm_start);
-                       vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
                }
                mutex_unlock(&ufile->umap_lock);
        skip_mm:
-               up_write(&mm->mmap_sem);
+               up_read(&mm->mmap_sem);
                mmput(mm);
        }
 }
diff --git a/drivers/infiniband/hw/mlx5/main.c 
b/drivers/infiniband/hw/mlx5/main.c
index 94fe253d4956..497181f5ba09 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1982,6 +1982,7 @@ static int mlx5_ib_mmap_clock_info_page(struct 
mlx5_ib_dev *dev,
 
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
+       vma->vm_flags &= ~VM_MAYWRITE;
 
        if (!dev->mdev->clock_info_page)
                return -EOPNOTSUPP;
@@ -2147,19 +2148,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, 
struct vm_area_struct *vm
 
                if (vma->vm_flags & VM_WRITE)
                        return -EPERM;
+               vma->vm_flags &= ~VM_MAYWRITE;
 
                /* Don't expose to user-space information it shouldn't have */
                if (PAGE_SIZE > 4096)
                        return -EOPNOTSUPP;
 
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
                pfn = (dev->mdev->iseg_base +
                       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
                        PAGE_SHIFT;
-               if (io_remap_pfn_range(vma, vma->vm_start, pfn,
-                                      PAGE_SIZE, vma->vm_page_prot))
-                       return -EAGAIN;
-               break;
+               return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
+                                        PAGE_SIZE,
+                                        pgprot_noncached(vma->vm_page_prot));
        case MLX5_IB_MMAP_CLOCK_INFO:
                return mlx5_ib_mmap_clock_info_page(dev, vma, context);
 
diff --git a/drivers/infiniband/sw/rdmavt/mr.c 
b/drivers/infiniband/sw/rdmavt/mr.c
index 49c9541050d4..5819c9d6ffdc 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
        if (unlikely(mapped_segs == mr->mr.max_segs))
                return -ENOMEM;
 
-       if (mr->mr.length == 0) {
-               mr->mr.user_base = addr;
-               mr->mr.iova = addr;
-       }
-
        m = mapped_segs / RVT_SEGSZ;
        n = mapped_segs % RVT_SEGSZ;
        mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
  * @sg_nents: number of entries in sg
  * @sg_offset: offset in bytes into sg
  *
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
  * Return: number of sg elements mapped to the memory region
  */
 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
                  int sg_nents, unsigned int *sg_offset)
 {
        struct rvt_mr *mr = to_imr(ibmr);
+       int ret;
 
        mr->mr.length = 0;
        mr->mr.page_shift = PAGE_SHIFT;
-       return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
-                             rvt_set_page);
+       ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+       mr->mr.user_base = ibmr->iova;
+       mr->mr.iova = ibmr->iova;
+       mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+       mr->mr.length = (size_t)ibmr->length;
+       return ret;
 }
 
 /**
@@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, 
u32 key,
        ibmr->rkey = key;
        mr->mr.lkey = key;
        mr->mr.access_flags = access;
+       mr->mr.iova = ibmr->iova;
        atomic_set(&mr->mr.lkey_invalid, 0);
 
        return 0;
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index df64d6aed4f7..93901ebd122a 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
        }
 
        rc = f11_write_control_regs(fn, &f11->sens_query,
-                          &f11->dev_controls, fn->fd.query_base_addr);
+                          &f11->dev_controls, fn->fd.control_base_addr);
        if (rc)
                dev_warn(&fn->dev, "Failed to write control registers\n");
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c 
b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 6fd15a734324..58f02c85f2fe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
        /* create driver workqueue */
        fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
                                          fm10k_driver_name);
+       if (!fm10k_workqueue)
+               return -ENOMEM;
 
        fm10k_dbg_init();
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 03b2a9f9c589..cad34d6f5f45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -33,6 +33,26 @@
 #include <linux/bpf_trace.h>
 #include "en/xdp.h"
 
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
+{
+       int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+
+       /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
+        * The condition checked in mlx5e_rx_is_linear_skb is:
+        *   SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE         (1)
+        *   (Note that hw_mtu == sw_mtu + hard_mtu.)
+        * What is returned from this function is:
+        *   max_mtu = PAGE_SIZE - S - hr - hard_mtu                         (2)
+        * After assigning sw_mtu := max_mtu, the left side of (1) turns to
+        * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
+        * because both PAGE_SIZE and S are already aligned. Any number greater
+        * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
+        * so max_mtu is the maximum MTU allowed.
+        */
+
+       return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
+}
+
 static inline bool
 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
                    struct xdp_buff *xdp)
@@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct 
mlx5e_rq *rq)
                                        mlx5e_xdpi_fifo_pop(xdpi_fifo);
 
                                if (is_redirect) {
-                                       xdp_return_frame(xdpi.xdpf);
                                        dma_unmap_single(sq->pdev, 
xdpi.dma_addr,
                                                         xdpi.xdpf->len, 
DMA_TO_DEVICE);
+                                       xdp_return_frame(xdpi.xdpf);
                                } else {
                                        /* Recycle RX page */
                                        mlx5e_page_release(rq, &xdpi.di, true);
@@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct 
mlx5e_rq *rq)
                                mlx5e_xdpi_fifo_pop(xdpi_fifo);
 
                        if (is_redirect) {
-                               xdp_return_frame(xdpi.xdpf);
                                dma_unmap_single(sq->pdev, xdpi.dma_addr,
                                                 xdpi.xdpf->len, DMA_TO_DEVICE);
+                               xdp_return_frame(xdpi.xdpf);
                        } else {
                                /* Recycle RX page */
                                mlx5e_page_release(rq, &xdpi.di, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index ee27a7c8cd87..553956cadc8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -34,13 +34,12 @@
 
 #include "en.h"
 
-#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
-                                MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 #define MLX5E_XDP_TX_EMPTY_DS_COUNT \
        (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
 #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
 
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
                      void *va, u16 *rx_headroom, u32 *len);
 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3b9e5f0d0212..253496c4a3db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1470,7 +1470,7 @@ static int mlx5e_get_module_info(struct net_device 
*netdev,
                break;
        case MLX5_MODULE_ID_SFP:
                modinfo->type       = ETH_MODULE_SFF_8472;
-               modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+               modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
                break;
        default:
                netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0cb19e4dd439..2d269acdbc8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3816,7 +3816,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int 
new_mtu,
        if (params->xdp_prog &&
            !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
                netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP 
enabled\n",
-                          new_mtu, MLX5E_XDP_MAX_MTU);
+                          new_mtu, mlx5e_xdp_max_mtu(params));
                err = -EINVAL;
                goto out;
        }
@@ -4280,7 +4280,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, 
struct bpf_prog *prog)
 
        if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
                netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
-                           new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+                           new_channels.params.sw_mtu,
+                           mlx5e_xdp_max_mtu(&new_channels.params));
                return -EINVAL;
        }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c 
b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 2b82f35f4c35..efce1fa37f6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -404,10 +404,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
 
        i2c_addr = MLX5_I2C_ADDR_LOW;
-       if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
-               i2c_addr = MLX5_I2C_ADDR_HIGH;
-               offset -= MLX5_EEPROM_PAGE_LENGTH;
-       }
 
        MLX5_SET(mcia_reg, in, l, 0);
        MLX5_SET(mcia_reg, in, module, module_num);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h 
b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index ffee38e36ce8..8648ca171254 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
 
 #define MLXSW_PCI_SW_RESET                     0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT             BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       13000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       20000
 #define MLXSW_PCI_SW_RESET_WAIT_MSECS          100
 #define MLXSW_PCI_FW_READY                     0xA1844
 #define MLXSW_PCI_FW_READY_MASK                        0xFFFF
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c 
b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index cbdee5164be7..ce49504e1f9c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2667,11 +2667,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
        if (err)
                return err;
 
+       mlxsw_sp_port->link.autoneg = autoneg;
+
        if (!netif_running(dev))
                return 0;
 
-       mlxsw_sp_port->link.autoneg = autoneg;
-
        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 
@@ -2961,7 +2961,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port 
*mlxsw_sp_port)
                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
                                            MLXSW_REG_QEEC_HIERARCY_TC,
                                            i + 8, i,
-                                           false, 0);
+                                           true, 100);
                if (err)
                        return err;
        }
diff --git a/drivers/net/ethernet/socionext/netsec.c 
b/drivers/net/ethernet/socionext/netsec.c
index a18149720aa2..cba5881b2746 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv)
 }
 
 static void *netsec_alloc_rx_data(struct netsec_priv *priv,
-                                 dma_addr_t *dma_handle, u16 *desc_len)
+                                 dma_addr_t *dma_handle, u16 *desc_len,
+                                 bool napi)
 {
        size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        size_t payload_len = NETSEC_RX_BUF_SZ;
@@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
 
        total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
 
-       buf = napi_alloc_frag(total_len);
+       buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len);
        if (!buf)
                return NULL;
 
@@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int 
budget)
                /* allocate a fresh buffer and map it to the hardware.
                 * This will eventually replace the old buffer in the hardware
                 */
-               buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+               buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len,
+                                               true);
                if (unlikely(!buf_addr))
                        break;
 
@@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
                void *buf;
                u16 len;
 
-               buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+               buf = netsec_alloc_rx_data(priv, &dma_handle, &len,
+                                          false);
                if (!buf) {
                        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
                        goto err_out;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 019ab99e65bb..1d8d6f2ddfd6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2590,8 +2590,6 @@ static int stmmac_open(struct net_device *dev)
        u32 chan;
        int ret;
 
-       stmmac_check_ether_addr(priv);
-
        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
            priv->hw->pcs != STMMAC_PCS_TBI &&
            priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -4265,6 +4263,8 @@ int stmmac_dvr_probe(struct device *device,
        if (ret)
                goto error_hw_init;
 
+       stmmac_check_ether_addr(priv);
+
        /* Configure real RX and TX queues */
        netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
        netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index d819e8eaba12..cc1e887e47b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
                },
                .driver_data = (void *)&galileo_stmmac_dmi_data,
        },
+       /*
+        * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
+        * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+        * has only one pci network device while other asset tags are
+        * for IOT2040 which has two.
+        */
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
-                       DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
-                                       "6ES7647-0AA00-1YA2"),
                },
                .driver_data = (void *)&iot2040_stmmac_dmi_data,
        },
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index f4e93f5fc204..ea90db3c7705 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -153,7 +153,7 @@ slhc_init(int rslots, int tslots)
 void
 slhc_free(struct slcompress *comp)
 {
-       if ( comp == NULLSLCOMPR )
+       if ( IS_ERR_OR_NULL(comp) )
                return;
 
        if ( comp->tstate != NULLSLSTATE )
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 1283632091d5..7dcda9364009 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1157,6 +1157,13 @@ static int team_port_add(struct team *team, struct 
net_device *port_dev,
                return -EINVAL;
        }
 
+       if (netdev_has_upper_dev(dev, port_dev)) {
+               NL_SET_ERR_MSG(extack, "Device is already an upper device of 
the team interface");
+               netdev_err(dev, "Device %s is already an upper device of the 
team interface\n",
+                          portname);
+               return -EBUSY;
+       }
+
        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
            vlan_uses_dev(dev)) {
                NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team 
device has VLAN set up");
diff --git a/drivers/net/wireless/mac80211_hwsim.c 
b/drivers/net/wireless/mac80211_hwsim.c
index 6359053bd0c7..862fd2b92d12 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2642,7 +2642,7 @@ static int mac80211_hwsim_new_radio(struct genl_info 
*info,
        enum nl80211_band band;
        const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
        struct net *net;
-       int idx;
+       int idx, i;
        int n_limits = 0;
 
        if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2766,12 +2766,23 @@ static int mac80211_hwsim_new_radio(struct genl_info 
*info,
                goto failed_hw;
        }
 
+       data->if_combination.max_interfaces = 0;
+       for (i = 0; i < n_limits; i++)
+               data->if_combination.max_interfaces +=
+                       data->if_limits[i].max;
+
        data->if_combination.n_limits = n_limits;
-       data->if_combination.max_interfaces = 2048;
        data->if_combination.limits = data->if_limits;
 
-       hw->wiphy->iface_combinations = &data->if_combination;
-       hw->wiphy->n_iface_combinations = 1;
+       /*
+        * If we actually were asked to support combinations,
+        * advertise them - if there's only a single thing like
+        * only IBSS then don't advertise it as combinations.
+        */
+       if (data->if_combination.max_interfaces > 1) {
+               hw->wiphy->iface_combinations = &data->if_combination;
+               hw->wiphy->n_iface_combinations = 1;
+       }
 
        if (param->ciphers) {
                memcpy(data->ciphers, param->ciphers,
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 53564386ed57..8987cec9549d 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1896,14 +1896,11 @@ int usb_runtime_idle(struct device *dev)
        return -EBUSY;
 }
 
-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
 {
        struct usb_hcd *hcd = bus_to_hcd(udev->bus);
        int ret = -EPERM;
 
-       if (enable && !udev->usb2_hw_lpm_allowed)
-               return 0;
-
        if (hcd->driver->set_usb2_hw_lpm) {
                ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
                if (!ret)
@@ -1913,6 +1910,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, 
int enable)
        return ret;
 }
 
+int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+       if (!udev->usb2_hw_lpm_capable ||
+           !udev->usb2_hw_lpm_allowed ||
+           udev->usb2_hw_lpm_enabled)
+               return 0;
+
+       return usb_set_usb2_hardware_lpm(udev, 1);
+}
+
+int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+{
+       if (!udev->usb2_hw_lpm_enabled)
+               return 0;
+
+       return usb_set_usb2_hardware_lpm(udev, 0);
+}
+
 #endif /* CONFIG_PM */
 
 struct bus_type usb_bus_type = {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1d1e61e980f3..55c87be5764c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3220,8 +3220,7 @@ int usb_port_suspend(struct usb_device *udev, 
pm_message_t msg)
        }
 
        /* disable USB2 hardware LPM */
-       if (udev->usb2_hw_lpm_enabled == 1)
-               usb_set_usb2_hardware_lpm(udev, 0);
+       usb_disable_usb2_hardware_lpm(udev);
 
        if (usb_disable_ltm(udev)) {
                dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
@@ -3259,8 +3258,7 @@ int usb_port_suspend(struct usb_device *udev, 
pm_message_t msg)
                usb_enable_ltm(udev);
  err_ltm:
                /* Try to enable USB2 hardware LPM again */
-               if (udev->usb2_hw_lpm_capable == 1)
-                       usb_set_usb2_hardware_lpm(udev, 1);
+               usb_enable_usb2_hardware_lpm(udev);
 
                if (udev->do_remote_wakeup)
                        (void) usb_disable_remote_wakeup(udev);
@@ -3543,8 +3541,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t 
msg)
                hub_port_logical_disconnect(hub, port1);
        } else  {
                /* Try to enable USB2 hardware LPM */
-               if (udev->usb2_hw_lpm_capable == 1)
-                       usb_set_usb2_hardware_lpm(udev, 1);
+               usb_enable_usb2_hardware_lpm(udev);
 
                /* Try to enable USB3 LTM */
                usb_enable_ltm(udev);
@@ -4435,7 +4432,7 @@ static void hub_set_initial_usb2_lpm_policy(struct 
usb_device *udev)
        if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) 
||
                        connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
                udev->usb2_hw_lpm_allowed = 1;
-               usb_set_usb2_hardware_lpm(udev, 1);
+               usb_enable_usb2_hardware_lpm(udev);
        }
 }
 
@@ -5649,8 +5646,7 @@ static int usb_reset_and_verify_device(struct usb_device 
*udev)
        /* Disable USB2 hardware LPM.
         * It will be re-enabled by the enumeration process.
         */
-       if (udev->usb2_hw_lpm_enabled == 1)
-               usb_set_usb2_hardware_lpm(udev, 0);
+       usb_disable_usb2_hardware_lpm(udev);
 
        /* Disable LPM while we reset the device and reinstall the alt settings.
         * Device-initiated LPM, and system exit latency settings are cleared
@@ -5753,7 +5749,7 @@ static int usb_reset_and_verify_device(struct usb_device 
*udev)
 
 done:
        /* Now that the alt settings are re-installed, enable LTM and LPM. */
-       usb_set_usb2_hardware_lpm(udev, 1);
+       usb_enable_usb2_hardware_lpm(udev);
        usb_unlocked_enable_lpm(udev);
        usb_enable_ltm(udev);
        usb_release_bos_descriptor(udev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index bfa5eda0cc26..4f33eb632a88 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1243,8 +1243,7 @@ void usb_disable_device(struct usb_device *dev, int 
skip_ep0)
                        dev->actconfig->interface[i] = NULL;
                }
 
-               if (dev->usb2_hw_lpm_enabled == 1)
-                       usb_set_usb2_hardware_lpm(dev, 0);
+               usb_disable_usb2_hardware_lpm(dev);
                usb_unlocked_disable_lpm(dev);
                usb_disable_ltm(dev);
 
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index ea18284dfa9a..7e88fdfe3cf5 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
 
        if (!ret) {
                udev->usb2_hw_lpm_allowed = value;
-               ret = usb_set_usb2_hardware_lpm(udev, value);
+               if (value)
+                       ret = usb_enable_usb2_hardware_lpm(udev);
+               else
+                       ret = usb_disable_usb2_hardware_lpm(udev);
        }
 
        usb_unlock_device(udev);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 546a2219454b..d95a5358f73d 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
 extern int usb_runtime_suspend(struct device *dev);
 extern int usb_runtime_resume(struct device *dev);
 extern int usb_runtime_idle(struct device *dev);
-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
+extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
+extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
 
 #else
 
@@ -112,7 +113,12 @@ static inline int usb_autoresume_device(struct usb_device 
*udev)
        return 0;
 }
 
-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int 
enable)
+static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+       return 0;
+}
+
+static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
 {
        return 0;
 }
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 73652e21efec..d0f731c9920a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
 MODULE_PARM_DESC(disable_hugepages,
                 "Disable VFIO IOMMU support for IOMMU hugepages.");
 
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+                "Maximum number of user DMA mappings per container (65535).");
+
 struct vfio_iommu {
        struct list_head        domain_list;
        struct vfio_domain      *external_domain; /* domain for external user */
        struct mutex            lock;
        struct rb_root          dma_list;
        struct blocking_notifier_head notifier;
+       unsigned int            dma_avail;
        bool                    v2;
        bool                    nesting;
 };
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, 
struct vfio_dma *dma)
        vfio_unlink_dma(iommu, dma);
        put_task_struct(dma->task);
        kfree(dma);
+       iommu->dma_avail++;
 }
 
 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                goto out_unlock;
        }
 
+       if (!iommu->dma_avail) {
+               ret = -ENOSPC;
+               goto out_unlock;
+       }
+
        dma = kzalloc(sizeof(*dma), GFP_KERNEL);
        if (!dma) {
                ret = -ENOMEM;
                goto out_unlock;
        }
 
+       iommu->dma_avail--;
        dma->iova = iova;
        dma->vaddr = vaddr;
        dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
 
        INIT_LIST_HEAD(&iommu->domain_list);
        iommu->dma_list = RB_ROOT;
+       iommu->dma_avail = dma_entry_limit;
        mutex_init(&iommu->lock);
        BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
 
diff --git a/fs/aio.c b/fs/aio.c
index 3d9669d011b9..efa13410e04e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,7 +181,7 @@ struct poll_iocb {
        struct file             *file;
        struct wait_queue_head  *head;
        __poll_t                events;
-       bool                    woken;
+       bool                    done;
        bool                    cancelled;
        struct wait_queue_entry wait;
        struct work_struct      work;
@@ -204,8 +204,7 @@ struct aio_kiocb {
        struct kioctx           *ki_ctx;
        kiocb_cancel_fn         *ki_cancel;
 
-       struct iocb __user      *ki_user_iocb;  /* user's aiocb */
-       __u64                   ki_user_data;   /* user's data for completion */
+       struct io_event         ki_res;
 
        struct list_head        ki_list;        /* the aio core uses this
                                                 * for cancellation */
@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
 /* aio_get_req
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
+ *
+ * The refcount is initialized to 2 - one for the async op completion,
+ * one for the synchronous code that does this.
  */
 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
 {
@@ -1034,7 +1036,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx 
*ctx)
        percpu_ref_get(&ctx->reqs);
        req->ki_ctx = ctx;
        INIT_LIST_HEAD(&req->ki_list);
-       refcount_set(&req->ki_refcnt, 0);
+       refcount_set(&req->ki_refcnt, 2);
        req->ki_eventfd = NULL;
        return req;
 }
@@ -1067,30 +1069,18 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
        return ret;
 }
 
-static inline void iocb_put(struct aio_kiocb *iocb)
+static inline void iocb_destroy(struct aio_kiocb *iocb)
 {
-       if (refcount_read(&iocb->ki_refcnt) == 0 ||
-           refcount_dec_and_test(&iocb->ki_refcnt)) {
-               if (iocb->ki_filp)
-                       fput(iocb->ki_filp);
-               percpu_ref_put(&iocb->ki_ctx->reqs);
-               kmem_cache_free(kiocb_cachep, iocb);
-       }
-}
-
-static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
-                          long res, long res2)
-{
-       ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
-       ev->data = iocb->ki_user_data;
-       ev->res = res;
-       ev->res2 = res2;
+       if (iocb->ki_filp)
+               fput(iocb->ki_filp);
+       percpu_ref_put(&iocb->ki_ctx->reqs);
+       kmem_cache_free(kiocb_cachep, iocb);
 }
 
 /* aio_complete
  *     Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb)
 {
        struct kioctx   *ctx = iocb->ki_ctx;
        struct aio_ring *ring;
@@ -1114,14 +1104,14 @@ static void aio_complete(struct aio_kiocb *iocb, long 
res, long res2)
        ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
        event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
-       aio_fill_event(event, iocb, res, res2);
+       *event = iocb->ki_res;
 
        kunmap_atomic(ev_page);
        flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 
-       pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
-                ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
-                res, res2);
+       pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
+                (void __user *)(unsigned long)iocb->ki_res.obj,
+                iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
 
        /* after flagging the request as done, we
         * must never even look at it again
@@ -1163,7 +1153,14 @@ static void aio_complete(struct aio_kiocb *iocb, long 
res, long res2)
 
        if (waitqueue_active(&ctx->wait))
                wake_up(&ctx->wait);
-       iocb_put(iocb);
+}
+
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+       if (refcount_dec_and_test(&iocb->ki_refcnt)) {
+               aio_complete(iocb);
+               iocb_destroy(iocb);
+       }
 }
 
 /* aio_read_events_ring
@@ -1437,7 +1434,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long 
res, long res2)
                file_end_write(kiocb->ki_filp);
        }
 
-       aio_complete(iocb, res, res2);
+       iocb->ki_res.res = res;
+       iocb->ki_res.res2 = res2;
+       iocb_put(iocb);
 }
 
 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
@@ -1585,11 +1584,10 @@ static ssize_t aio_write(struct kiocb *req, const 
struct iocb *iocb,
 
 static void aio_fsync_work(struct work_struct *work)
 {
-       struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
-       int ret;
+       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, 
fsync.work);
 
-       ret = vfs_fsync(req->file, req->datasync);
-       aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+       iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+       iocb_put(iocb);
 }
 
 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
@@ -1608,11 +1606,6 @@ static int aio_fsync(struct fsync_iocb *req, const 
struct iocb *iocb,
        return 0;
 }
 
-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,9 +1631,11 @@ static void aio_poll_complete_work(struct work_struct 
*work)
                return;
        }
        list_del_init(&iocb->ki_list);
+       iocb->ki_res.res = mangle_poll(mask);
+       req->done = true;
        spin_unlock_irq(&ctx->ctx_lock);
 
-       aio_poll_complete(iocb, mask);
+       iocb_put(iocb);
 }
 
 /* assumes we are called with irqs disabled */
@@ -1668,31 +1663,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, 
unsigned mode, int sync,
        __poll_t mask = key_to_poll(key);
        unsigned long flags;
 
-       req->woken = true;
-
        /* for instances that support it check for an event match first: */
-       if (mask) {
-               if (!(mask & req->events))
-                       return 0;
+       if (mask && !(mask & req->events))
+               return 0;
+
+       list_del_init(&req->wait.entry);
 
+       if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
                /*
                 * Try to complete the iocb inline if we can. Use
                 * irqsave/irqrestore because not all filesystems (e.g. fuse)
                 * call this function with IRQs disabled and because IRQs
                 * have to be disabled before ctx_lock is obtained.
                 */
-               if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
-                       list_del(&iocb->ki_list);
-                       spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-
-                       list_del_init(&req->wait.entry);
-                       aio_poll_complete(iocb, mask);
-                       return 1;
-               }
+               list_del(&iocb->ki_list);
+               iocb->ki_res.res = mangle_poll(mask);
+               req->done = true;
+               spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+               iocb_put(iocb);
+       } else {
+               schedule_work(&req->work);
        }
-
-       list_del_init(&req->wait.entry);
-       schedule_work(&req->work);
        return 1;
 }
 
@@ -1724,6 +1715,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const 
struct iocb *iocb)
        struct kioctx *ctx = aiocb->ki_ctx;
        struct poll_iocb *req = &aiocb->poll;
        struct aio_poll_table apt;
+       bool cancel = false;
        __poll_t mask;
 
        /* reject any unknown events outside the normal event mask. */
@@ -1737,7 +1729,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const 
struct iocb *iocb)
        req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
 
        req->head = NULL;
-       req->woken = false;
+       req->done = false;
        req->cancelled = false;
 
        apt.pt._qproc = aio_poll_queue_proc;
@@ -1749,41 +1741,34 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const 
struct iocb *iocb)
        INIT_LIST_HEAD(&req->wait.entry);
        init_waitqueue_func_entry(&req->wait, aio_poll_wake);
 
-       /* one for removal from waitqueue, one for this function */
-       refcount_set(&aiocb->ki_refcnt, 2);
-
        mask = vfs_poll(req->file, &apt.pt) & req->events;
-       if (unlikely(!req->head)) {
-               /* we did not manage to set up a waitqueue, done */
-               goto out;
-       }
-
        spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       if (req->woken) {
-               /* wake_up context handles the rest */
-               mask = 0;
+       if (likely(req->head)) {
+               spin_lock(&req->head->lock);
+               if (unlikely(list_empty(&req->wait.entry))) {
+                       if (apt.error)
+                               cancel = true;
+                       apt.error = 0;
+                       mask = 0;
+               }
+               if (mask || apt.error) {
+                       list_del_init(&req->wait.entry);
+               } else if (cancel) {
+                       WRITE_ONCE(req->cancelled, true);
+               } else if (!req->done) { /* actually waiting for an event */
+                       list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+                       aiocb->ki_cancel = aio_poll_cancel;
+               }
+               spin_unlock(&req->head->lock);
+       }
+       if (mask) { /* no async, we'd stolen it */
+               aiocb->ki_res.res = mangle_poll(mask);
                apt.error = 0;
-       } else if (mask || apt.error) {
-               /* if we get an error or a mask we are done */
-               WARN_ON_ONCE(list_empty(&req->wait.entry));
-               list_del_init(&req->wait.entry);
-       } else {
-               /* actually waiting for an event */
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-               aiocb->ki_cancel = aio_poll_cancel;
        }
-       spin_unlock(&req->head->lock);
        spin_unlock_irq(&ctx->ctx_lock);
-
-out:
-       if (unlikely(apt.error))
-               return apt.error;
-
        if (mask)
-               aio_poll_complete(aiocb, mask);
-       iocb_put(aiocb);
-       return 0;
+               iocb_put(aiocb);
+       return apt.error;
 }
 
 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
@@ -1842,8 +1827,10 @@ static int __io_submit_one(struct kioctx *ctx, const 
struct iocb *iocb,
                goto out_put_req;
        }
 
-       req->ki_user_iocb = user_iocb;
-       req->ki_user_data = iocb->aio_data;
+       req->ki_res.obj = (u64)(unsigned long)user_iocb;
+       req->ki_res.data = iocb->aio_data;
+       req->ki_res.res = 0;
+       req->ki_res.res2 = 0;
 
        switch (iocb->aio_lio_opcode) {
        case IOCB_CMD_PREAD:
@@ -1873,18 +1860,21 @@ static int __io_submit_one(struct kioctx *ctx, const 
struct iocb *iocb,
                break;
        }
 
+       /* Done with the synchronous reference */
+       iocb_put(req);
+
        /*
         * If ret is 0, we'd either done aio_complete() ourselves or have
         * arranged for that to be done asynchronously.  Anything non-zero
         * means that we need to destroy req ourselves.
         */
-       if (ret)
-               goto out_put_req;
-       return 0;
+       if (!ret)
+               return 0;
+
 out_put_req:
        if (req->ki_eventfd)
                eventfd_ctx_put(req->ki_eventfd);
-       iocb_put(req);
+       iocb_destroy(req);
 out_put_reqs_available:
        put_reqs_available(ctx, 1);
        return ret;
@@ -1997,24 +1987,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, 
ctx_id,
 }
 #endif
 
-/* lookup_kiocb
- *     Finds a given iocb for cancellation.
- */
-static struct aio_kiocb *
-lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
-{
-       struct aio_kiocb *kiocb;
-
-       assert_spin_locked(&ctx->ctx_lock);
-
-       /* TODO: use a hash or array, this sucks. */
-       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
-               if (kiocb->ki_user_iocb == iocb)
-                       return kiocb;
-       }
-       return NULL;
-}
-
 /* sys_io_cancel:
  *     Attempts to cancel an iocb previously passed to io_submit.  If
  *     the operation is successfully cancelled, the resulting event is
@@ -2032,6 +2004,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct 
iocb __user *, iocb,
        struct aio_kiocb *kiocb;
        int ret = -EINVAL;
        u32 key;
+       u64 obj = (u64)(unsigned long)iocb;
 
        if (unlikely(get_user(key, &iocb->aio_key)))
                return -EFAULT;
@@ -2043,10 +2016,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, 
struct iocb __user *, iocb,
                return -EINVAL;
 
        spin_lock_irq(&ctx->ctx_lock);
-       kiocb = lookup_kiocb(ctx, iocb);
-       if (kiocb) {
-               ret = kiocb->ki_cancel(&kiocb->rw);
-               list_del_init(&kiocb->ki_list);
+       /* TODO: use a hash or array, this sucks. */
+       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+               if (kiocb->ki_res.obj == obj) {
+                       ret = kiocb->ki_cancel(&kiocb->rw);
+                       list_del_init(&kiocb->ki_list);
+                       break;
+               }
        }
        spin_unlock_irq(&ctx->ctx_lock);
 
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 82928cea0209..7f3f64ba464f 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1470,6 +1470,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
 {
        struct ceph_inode_info *dci = ceph_inode(dir);
+       unsigned hash;
 
        switch (dci->i_dir_layout.dl_dir_hash) {
        case 0: /* for backward compat */
@@ -1477,8 +1478,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct 
dentry *dn)
                return dn->d_name.hash;
 
        default:
-               return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+               spin_lock(&dn->d_lock);
+               hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
                                     dn->d_name.name, dn->d_name.len);
+               spin_unlock(&dn->d_lock);
+               return hash;
        }
 }
 
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 163fc74bf221..5cec784e30f6 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1286,6 +1286,15 @@ static int remove_session_caps_cb(struct inode *inode, 
struct ceph_cap *cap,
                        list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
                        ci->i_prealloc_cap_flush = NULL;
                }
+
+               if (drop &&
+                  ci->i_wrbuffer_ref_head == 0 &&
+                  ci->i_wr_ref == 0 &&
+                  ci->i_dirty_caps == 0 &&
+                  ci->i_flushing_caps == 0) {
+                      ceph_put_snap_context(ci->i_head_snapc);
+                      ci->i_head_snapc = NULL;
+               }
        }
        spin_unlock(&ci->i_ceph_lock);
        while (!list_empty(&to_remove)) {
@@ -1958,10 +1967,39 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int 
*plen, u64 *base,
        return path;
 }
 
+/* Duplicate the dentry->d_name.name safely */
+static int clone_dentry_name(struct dentry *dentry, const char **ppath,
+                            int *ppathlen)
+{
+       u32 len;
+       char *name;
+
+retry:
+       len = READ_ONCE(dentry->d_name.len);
+       name = kmalloc(len + 1, GFP_NOFS);
+       if (!name)
+               return -ENOMEM;
+
+       spin_lock(&dentry->d_lock);
+       if (dentry->d_name.len != len) {
+               spin_unlock(&dentry->d_lock);
+               kfree(name);
+               goto retry;
+       }
+       memcpy(name, dentry->d_name.name, len);
+       spin_unlock(&dentry->d_lock);
+
+       name[len] = '\0';
+       *ppath = name;
+       *ppathlen = len;
+       return 0;
+}
+
 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
                             const char **ppath, int *ppathlen, u64 *pino,
-                            int *pfreepath)
+                            bool *pfreepath, bool parent_locked)
 {
+       int ret;
        char *path;
 
        rcu_read_lock();
@@ -1970,8 +2008,15 @@ static int build_dentry_path(struct dentry *dentry, 
struct inode *dir,
        if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
                *pino = ceph_ino(dir);
                rcu_read_unlock();
-               *ppath = dentry->d_name.name;
-               *ppathlen = dentry->d_name.len;
+               if (parent_locked) {
+                       *ppath = dentry->d_name.name;
+                       *ppathlen = dentry->d_name.len;
+               } else {
+                       ret = clone_dentry_name(dentry, ppath, ppathlen);
+                       if (ret)
+                               return ret;
+                       *pfreepath = true;
+               }
                return 0;
        }
        rcu_read_unlock();
@@ -1979,13 +2024,13 @@ static int build_dentry_path(struct dentry *dentry, 
struct inode *dir,
        if (IS_ERR(path))
                return PTR_ERR(path);
        *ppath = path;
-       *pfreepath = 1;
+       *pfreepath = true;
        return 0;
 }
 
 static int build_inode_path(struct inode *inode,
                            const char **ppath, int *ppathlen, u64 *pino,
-                           int *pfreepath)
+                           bool *pfreepath)
 {
        struct dentry *dentry;
        char *path;
@@ -2001,7 +2046,7 @@ static int build_inode_path(struct inode *inode,
        if (IS_ERR(path))
                return PTR_ERR(path);
        *ppath = path;
-       *pfreepath = 1;
+       *pfreepath = true;
        return 0;
 }
 
@@ -2012,7 +2057,7 @@ static int build_inode_path(struct inode *inode,
 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                                  struct inode *rdiri, const char *rpath,
                                  u64 rino, const char **ppath, int *pathlen,
-                                 u64 *ino, int *freepath)
+                                 u64 *ino, bool *freepath, bool parent_locked)
 {
        int r = 0;
 
@@ -2022,7 +2067,7 @@ static int set_request_path_attr(struct inode *rinode, 
struct dentry *rdentry,
                     ceph_snap(rinode));
        } else if (rdentry) {
                r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
-                                       freepath);
+                                       freepath, parent_locked);
                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
                     *ppath);
        } else if (rpath || rino) {
@@ -2048,7 +2093,7 @@ static struct ceph_msg *create_request_message(struct 
ceph_mds_client *mdsc,
        const char *path2 = NULL;
        u64 ino1 = 0, ino2 = 0;
        int pathlen1 = 0, pathlen2 = 0;
-       int freepath1 = 0, freepath2 = 0;
+       bool freepath1 = false, freepath2 = false;
        int len;
        u16 releases;
        void *p, *end;
@@ -2056,16 +2101,19 @@ static struct ceph_msg *create_request_message(struct 
ceph_mds_client *mdsc,
 
        ret = set_request_path_attr(req->r_inode, req->r_dentry,
                              req->r_parent, req->r_path1, req->r_ino1.ino,
-                             &path1, &pathlen1, &ino1, &freepath1);
+                             &path1, &pathlen1, &ino1, &freepath1,
+                             test_bit(CEPH_MDS_R_PARENT_LOCKED,
+                                       &req->r_req_flags));
        if (ret < 0) {
                msg = ERR_PTR(ret);
                goto out;
        }
 
+       /* If r_old_dentry is set, then assume that its parent is locked */
        ret = set_request_path_attr(NULL, req->r_old_dentry,
                              req->r_old_dentry_dir,
                              req->r_path2, req->r_ino2.ino,
-                             &path2, &pathlen2, &ino2, &freepath2);
+                             &path2, &pathlen2, &ino2, &freepath2, true);
        if (ret < 0) {
                msg = ERR_PTR(ret);
                goto out_free1;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f74193da0e09..1f46b02f7314 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
        old_snapc = NULL;
 
 update_snapc:
-       if (ci->i_head_snapc) {
+       if (ci->i_wrbuffer_ref_head == 0 &&
+           ci->i_wr_ref == 0 &&
+           ci->i_dirty_caps == 0 &&
+           ci->i_flushing_caps == 0) {
+               ci->i_head_snapc = NULL;
+       } else {
                ci->i_head_snapc = ceph_get_snap_context(new_snapc);
                dout(" new snapc is %p\n", new_snapc);
        }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 7c05353b766c..7c3f9d00586e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2796,7 +2796,6 @@ static void collect_uncached_write_data(struct 
cifs_aio_ctx *ctx)
        struct cifs_tcon *tcon;
        struct cifs_sb_info *cifs_sb;
        struct dentry *dentry = ctx->cfile->dentry;
-       unsigned int i;
        int rc;
 
        tcon = tlink_tcon(ctx->cfile->tlink);
@@ -2860,10 +2859,6 @@ static void collect_uncached_write_data(struct 
cifs_aio_ctx *ctx)
                kref_put(&wdata->refcount, cifs_uncached_writedata_release);
        }
 
-       if (!ctx->direct_io)
-               for (i = 0; i < ctx->npages; i++)
-                       put_page(ctx->bv[i].bv_page);
-
        cifs_stats_bytes_written(tcon, ctx->total_len);
        set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
 
@@ -3472,7 +3467,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
        struct iov_iter *to = &ctx->iter;
        struct cifs_sb_info *cifs_sb;
        struct cifs_tcon *tcon;
-       unsigned int i;
        int rc;
 
        tcon = tlink_tcon(ctx->cfile->tlink);
@@ -3556,15 +3550,8 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
                kref_put(&rdata->refcount, cifs_uncached_readdata_release);
        }
 
-       if (!ctx->direct_io) {
-               for (i = 0; i < ctx->npages; i++) {
-                       if (ctx->should_dirty)
-                               set_page_dirty(ctx->bv[i].bv_page);
-                       put_page(ctx->bv[i].bv_page);
-               }
-
+       if (!ctx->direct_io)
                ctx->total_len = ctx->len - iov_iter_count(to);
-       }
 
        cifs_stats_bytes_read(tcon, ctx->total_len);
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 53fdb5df0d2e..538fd7d807e4 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry 
*from_dentry,
        if (rc == 0 || rc != -EBUSY)
                goto do_rename_exit;
 
+       /* Don't fall back to using SMB on SMB 2+ mount */
+       if (server->vals->protocol_id != 0)
+               goto do_rename_exit;
+
        /* open-file renames don't work across directories */
        if (to_dentry->d_parent != from_dentry->d_parent)
                goto do_rename_exit;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 1e1626a2cfc3..0dc6f08020ac 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -789,6 +789,11 @@ cifs_aio_ctx_alloc(void)
 {
        struct cifs_aio_ctx *ctx;
 
+       /*
+        * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
+        * to false so that we know when we have to unreference pages within
+        * cifs_aio_ctx_release()
+        */
        ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
        if (!ctx)
                return NULL;
@@ -807,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount)
                                        struct cifs_aio_ctx, refcount);
 
        cifsFileInfo_put(ctx->cfile);
-       kvfree(ctx->bv);
+
+       /*
+        * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
+        * which means that iov_iter_get_pages() was a success and thus that
+        * we have taken reference on pages.
+        */
+       if (ctx->bv) {
+               unsigned i;
+
+               for (i = 0; i < ctx->npages; i++) {
+                       if (ctx->should_dirty)
+                               set_page_dirty(ctx->bv[i].bv_page);
+                       put_page(ctx->bv[i].bv_page);
+               }
+               kvfree(ctx->bv);
+       }
+
        kfree(ctx);
 }
 
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 938e75cc3b66..85a3c051e622 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3402,6 +3402,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms 
*io_parms,
                                            rc);
                }
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+               cifs_small_buf_release(req);
                return rc == -ENODATA ? 0 : rc;
        } else
                trace_smb3_read_done(xid, req->PersistentFileId,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 86ed9c686249..dc82e7757f67 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -829,6 +829,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t 
*usage)
                bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, 
REQ_PRIO);
                if (IS_ERR(bh)) {
                        ret = PTR_ERR(bh);
+                       bh = NULL;
                        goto out;
                }
 
@@ -2903,6 +2904,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct 
inode *inode,
                        if (error == -EIO)
                                EXT4_ERROR_INODE(inode, "block %llu read error",
                                                 EXT4_I(inode)->i_file_acl);
+                       bh = NULL;
                        goto cleanup;
                }
                error = ext4_xattr_check_block(inode, bh);
@@ -3059,6 +3061,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
                if (IS_ERR(bh)) {
                        if (PTR_ERR(bh) == -ENOMEM)
                                return NULL;
+                       bh = NULL;
                        EXT4_ERROR_INODE(inode, "block %lu read error",
                                         (unsigned long)ce->e_value);
                } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0570391eaa16..15c025c1a305 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2041,7 +2041,8 @@ static int nfs23_validate_mount_data(void *options,
                memcpy(sap, &data->addr, sizeof(data->addr));
                args->nfs_server.addrlen = sizeof(data->addr);
                args->nfs_server.port = ntohs(data->addr.sin_port);
-               if (!nfs_verify_server_address(sap))
+               if (sap->sa_family != AF_INET ||
+                   !nfs_verify_server_address(sap))
                        goto out_no_address;
 
                if (!(data->flags & NFS_MOUNT_TCP))
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index c74e4538d0eb..258f741d6a21 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1023,8 +1023,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void 
*calldata)
        cb->cb_seq_status = 1;
        cb->cb_status = 0;
        if (minorversion) {
-               if (!nfsd41_cb_get_slot(clp, task))
+               if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
                        return;
+               cb->cb_holds_slot = true;
        }
        rpc_call_start(task);
 }
@@ -1051,6 +1052,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, 
struct nfsd4_callback
                return true;
        }
 
+       if (!cb->cb_holds_slot)
+               goto need_restart;
+
        switch (cb->cb_seq_status) {
        case 0:
                /*
@@ -1089,6 +1093,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, 
struct nfsd4_callback
                        cb->cb_seq_status);
        }
 
+       cb->cb_holds_slot = false;
        clear_bit(0, &clp->cl_cb_slot_busy);
        rpc_wake_up_next(&clp->cl_cb_waitq);
        dprintk("%s: freed slot, new seqid=%d\n", __func__,
@@ -1296,6 +1301,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct 
nfs4_client *clp,
        cb->cb_seq_status = 1;
        cb->cb_status = 0;
        cb->cb_need_restart = false;
+       cb->cb_holds_slot = false;
 }
 
 void nfsd4_run_cb(struct nfsd4_callback *cb)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 6a45fb00c5fc..f056b1d3fecd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -265,6 +265,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct 
knfsd_fh *fh,
 static void
 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
 {
+       locks_delete_block(&nbl->nbl_lock);
        locks_release_private(&nbl->nbl_lock);
        kfree(nbl);
 }
@@ -293,11 +294,18 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
                nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
                                        nbl_lru);
                list_del_init(&nbl->nbl_lru);
-               locks_delete_block(&nbl->nbl_lock);
                free_blocked_lock(nbl);
        }
 }
 
+static void
+nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
+{
+       struct nfsd4_blocked_lock       *nbl = container_of(cb,
+                                               struct nfsd4_blocked_lock, 
nbl_cb);
+       locks_delete_block(&nbl->nbl_lock);
+}
+
 static int
 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
 {
@@ -325,6 +333,7 @@ nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
 }
 
 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
+       .prepare        = nfsd4_cb_notify_lock_prepare,
        .done           = nfsd4_cb_notify_lock_done,
        .release        = nfsd4_cb_notify_lock_release,
 };
@@ -4863,7 +4872,6 @@ nfs4_laundromat(struct nfsd_net *nn)
                nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
-               locks_delete_block(&nbl->nbl_lock);
                free_blocked_lock(nbl);
        }
 out:
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 396c76755b03..9d6cb246c6c5 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -70,6 +70,7 @@ struct nfsd4_callback {
        int cb_seq_status;
        int cb_status;
        bool cb_need_restart;
+       bool cb_holds_slot;
 };
 
 struct nfsd4_callback_ops {
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d65390727541..7325baa8f9d4 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header 
*header)
        if (--header->nreg)
                return;
 
-       if (parent)
+       if (parent) {
                put_links(header);
-       start_unregistering(header);
+               start_unregistering(header);
+       }
+
        if (!--header->count)
                kfree_rcu(header, rcu);
 
diff --git a/fs/splice.c b/fs/splice.c
index 90c29675d573..7da7d5437472 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -333,8 +333,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
        .get = generic_pipe_buf_get,
 };
 
-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
-                                   struct pipe_buffer *buf)
+int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
+                            struct pipe_buffer *buf)
 {
        return 1;
 }
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1021106438b2..c80e5833b1d6 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -411,7 +411,6 @@ extern struct ttm_bo_global {
        /**
         * Protected by ttm_global_mutex.
         */
-       unsigned int use_count;
        struct list_head device_list;
 
        /**
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2c0af7b00715..c94ab8b53a23 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -447,6 +447,18 @@ static inline void eth_addr_dec(u8 *addr)
        u64_to_ether_addr(u, addr);
 }
 
+/**
+ * eth_addr_inc() - Increment the given MAC address.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_inc(u8 *addr)
+{
+       u64 u = ether_addr_to_u64(addr);
+
+       u++;
+       u64_to_ether_addr(u, addr);
+}
+
 /**
  * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
  * @dev: Pointer to a device structure
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 3ecd7ea212ae..66ee63cd5968 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -181,6 +181,7 @@ void free_pipe_info(struct pipe_inode_info *);
 void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
 
diff --git a/include/net/netfilter/nf_tables.h 
b/include/net/netfilter/nf_tables.h
index 0612439909dc..9e0b9ecb43db 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -382,6 +382,7 @@ void nft_unregister_set(struct nft_set_type *type);
  *     @dtype: data type (verdict or numeric type defined by userspace)
  *     @objtype: object type (see NFT_OBJECT_* definitions)
  *     @size: maximum set size
+ *     @use: number of rules references to this set
  *     @nelems: number of elements
  *     @ndeact: number of deactivated elements queued for removal
  *     @timeout: default timeout value in jiffies
@@ -407,6 +408,7 @@ struct nft_set {
        u32                             dtype;
        u32                             objtype;
        u32                             size;
+       u32                             use;
        atomic_t                        nelems;
        u32                             ndeact;
        u64                             timeout;
@@ -467,6 +469,10 @@ struct nft_set_binding {
        u32                             flags;
 };
 
+enum nft_trans_phase;
+void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+                             struct nft_set_binding *binding,
+                             enum nft_trans_phase phase);
 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                       struct nft_set_binding *binding);
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 5a0714ff500f..80f15b1c1a48 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
 int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-void nr_register_sysctl(void);
+int nr_register_sysctl(void);
 void nr_unregister_sysctl(void);
 
 #endif
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fb8b7b5d745d..451b1f9e80a6 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
        if (dl_entity_is_special(dl_se))
                return;
 
-       WARN_ON(hrtimer_active(&dl_se->inactive_timer));
        WARN_ON(dl_se->dl_non_contending);
 
        zerolag_time = dl_se->deadline -
@@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
         * If the "0-lag time" already passed, decrease the active
         * utilization now, instead of starting a timer
         */
-       if (zerolag_time < 0) {
+       if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
                if (dl_task(p))
                        sub_running_bw(dl_se, dl_rq);
                if (!dl_task(p) || p->state == TASK_DEAD) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index eeb605656d59..be55a64748ba 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1994,6 +1994,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, 
u64 *period)
        if (p->last_task_numa_placement) {
                delta = runtime - p->last_sum_exec_runtime;
                *period = now - p->last_task_numa_placement;
+
+               /* Avoid time going backwards, prevent potential divide error: 
*/
+               if (unlikely((s64)*period < 0))
+                       *period = 0;
        } else {
                delta = p->se.avg.load_sum;
                *period = LOAD_AVG_MAX;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b49affb4666b..4463ae28bf1a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -776,7 +776,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int 
cpu)
 
        preempt_disable_notrace();
        time = rb_time_stamp(buffer);
-       preempt_enable_no_resched_notrace();
+       preempt_enable_notrace();
 
        return time;
 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 89158aa93fa6..d07fc2836786 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
         * not modified.
         */
        pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
-       if (!pid_list)
+       if (!pid_list) {
+               trace_parser_put(&parser);
                return -ENOMEM;
+       }
 
        pid_list->pid_max = READ_ONCE(pid_max);
 
@@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
 
        pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
        if (!pid_list->pids) {
+               trace_parser_put(&parser);
                kfree(pid_list);
                return -ENOMEM;
        }
@@ -6820,19 +6823,23 @@ struct buffer_ref {
        struct ring_buffer      *buffer;
        void                    *page;
        int                     cpu;
-       int                     ref;
+       refcount_t              refcount;
 };
 
+static void buffer_ref_release(struct buffer_ref *ref)
+{
+       if (!refcount_dec_and_test(&ref->refcount))
+               return;
+       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
+       kfree(ref);
+}
+
 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
                                    struct pipe_buffer *buf)
 {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
-       if (--ref->ref)
-               return;
-
-       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
-       kfree(ref);
+       buffer_ref_release(ref);
        buf->private = 0;
 }
 
@@ -6841,7 +6848,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info 
*pipe,
 {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
-       ref->ref++;
+       refcount_inc(&ref->refcount);
 }
 
 /* Pipe buffer operations for a buffer. */
@@ -6849,7 +6856,7 @@ static const struct pipe_buf_operations 
buffer_pipe_buf_ops = {
        .can_merge              = 0,
        .confirm                = generic_pipe_buf_confirm,
        .release                = buffer_pipe_buf_release,
-       .steal                  = generic_pipe_buf_steal,
+       .steal                  = generic_pipe_buf_nosteal,
        .get                    = buffer_pipe_buf_get,
 };
 
@@ -6862,11 +6869,7 @@ static void buffer_spd_release(struct splice_pipe_desc 
*spd, unsigned int i)
        struct buffer_ref *ref =
                (struct buffer_ref *)spd->partial[i].private;
 
-       if (--ref->ref)
-               return;
-
-       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
-       kfree(ref);
+       buffer_ref_release(ref);
        spd->partial[i].private = 0;
 }
 
@@ -6921,7 +6924,7 @@ tracing_buffers_splice_read(struct file *file, loff_t 
*ppos,
                        break;
                }
 
-               ref->ref = 1;
+               refcount_set(&ref->refcount, 1);
                ref->buffer = iter->trace_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, 
iter->cpu_file);
                if (IS_ERR(ref->page)) {
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fc5d23d752a5..e94d2b6bee7f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2931,6 +2931,9 @@ static bool __flush_work(struct work_struct *work, bool 
from_cancel)
        if (WARN_ON(!wq_online))
                return false;
 
+       if (WARN_ON(!work->func))
+               return false;
+
        if (!from_cancel) {
                lock_map_acquire(&work->lockdep_map);
                lock_map_release(&work->lockdep_map);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d4df5b24d75e..350d5328014f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1952,6 +1952,7 @@ config TEST_KMOD
        depends on m
        depends on BLOCK && (64BIT || LBDAF)      # for XFS, BTRFS
        depends on NETDEVICES && NET_CORE && INET # for TUN
+       depends on BLOCK
        select TEST_LKM
        select XFS_FS
        select TUN
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 20dd3283bb1b..318ef6ccdb3b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
 
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
+#ifdef CONFIG_DISCONTIGMEM
+/*
+ * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
+ * are not on separate NUMA nodes. Functionally this works but with
+ * watermark_boost_factor, it can reclaim prematurely as the ranges can be
+ * quite small. By default, do not boost watermarks on discontigmem as in
+ * many cases very high-order allocations like THP are likely to be
+ * unsupported and the premature reclaim offsets the advantage of long-term
+ * fragmentation avoidance.
+ */
+int watermark_boost_factor __read_mostly;
+#else
 int watermark_boost_factor __read_mostly = 15000;
+#endif
 int watermark_scale_factor = 10;
 
 static unsigned long nr_kernel_pages __initdata;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f77888ec93f1..0bb4d712b80c 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt 
*match32,
                if (match_kern)
                        match_kern->match_size = ret;
 
-               if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+               /* rule should have no remaining data after target */
+               if (type == EBT_COMPAT_TARGET && size_left)
                        return -EINVAL;
 
                match32 = (struct compat_ebt_entry_mwt *) buf;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 25d9bef27d03..3c89ca325947 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1183,25 +1183,39 @@ static struct dst_entry *ipv4_dst_check(struct 
dst_entry *dst, u32 cookie)
        return dst;
 }
 
-static void ipv4_link_failure(struct sk_buff *skb)
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
 {
        struct ip_options opt;
-       struct rtable *rt;
        int res;
 
        /* Recompile ip options since IPCB may not be valid anymore.
+        * Also check we have a reasonable ipv4 header.
         */
-       memset(&opt, 0, sizeof(opt));
-       opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+           ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+               return;
 
-       rcu_read_lock();
-       res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
-       rcu_read_unlock();
+       memset(&opt, 0, sizeof(opt));
+       if (ip_hdr(skb)->ihl > 5) {
+               if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+                       return;
+               opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
 
-       if (res)
-               return;
+               rcu_read_lock();
+               res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+               rcu_read_unlock();
 
+               if (res)
+                       return;
+       }
        __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
+static void ipv4_link_failure(struct sk_buff *skb)
+{
+       struct rtable *rt;
+
+       ipv4_send_dest_unreach(skb);
 
        rt = skb_rtable(skb);
        if (rt)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ba0fc4b18465..eeb4041fa5f9 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static int comp_sack_nr_max = 255;
 static u32 u32_max_div_HZ = UINT_MAX / HZ;
+static int one_day_secs = 24 * 3600;
 
 /* obsolete */
 static int sysctl_tcp_low_latency __read_mostly;
@@ -1151,7 +1152,9 @@ static struct ctl_table ipv4_net_table[] = {
                .data           = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one_day_secs
        },
        {
                .procname       = "tcp_autocorking",
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index dc07fcc7938e..802db01e3075 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 
 #include <net/ncsi.h>
@@ -667,7 +668,10 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct 
ncsi_request *nr)
        ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
        /* Increase mac address by 1 for BMC's address */
-       saddr.sa_data[ETH_ALEN - 1]++;
+       eth_addr_inc((u8 *)saddr.sa_data);
+       if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+               return -ENXIO;
+
        ret = ops->ndo_set_mac_address(ndev, &saddr);
        if (ret < 0)
                netdev_warn(ndev, "NCSI: 'Writing mac address to device 
failed\n");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index acb124ce92ec..e2aac80f9b7b 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3624,6 +3624,9 @@ static int nf_tables_newset(struct net *net, struct sock 
*nlsk,
 
 static void nft_set_destroy(struct nft_set *set)
 {
+       if (WARN_ON(set->use > 0))
+               return;
+
        set->ops->destroy(set);
        module_put(to_set_type(set->ops)->owner);
        kfree(set->name);
@@ -3664,7 +3667,7 @@ static int nf_tables_delset(struct net *net, struct sock 
*nlsk,
                NL_SET_BAD_ATTR(extack, attr);
                return PTR_ERR(set);
        }
-       if (!list_empty(&set->bindings) ||
+       if (set->use ||
            (nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) 
{
                NL_SET_BAD_ATTR(extack, attr);
                return -EBUSY;
@@ -3694,6 +3697,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct 
nft_set *set,
        struct nft_set_binding *i;
        struct nft_set_iter iter;
 
+       if (set->use == UINT_MAX)
+               return -EOVERFLOW;
+
        if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
                return -EBUSY;
 
@@ -3721,6 +3727,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct 
nft_set *set,
        binding->chain = ctx->chain;
        list_add_tail_rcu(&binding->list, &set->bindings);
        nft_set_trans_bind(ctx, set);
+       set->use++;
 
        return 0;
 }
@@ -3740,6 +3747,25 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, 
struct nft_set *set,
 }
 EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
 
+void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+                             struct nft_set_binding *binding,
+                             enum nft_trans_phase phase)
+{
+       switch (phase) {
+       case NFT_TRANS_PREPARE:
+               set->use--;
+               return;
+       case NFT_TRANS_ABORT:
+       case NFT_TRANS_RELEASE:
+               set->use--;
+               /* fall through */
+       default:
+               nf_tables_unbind_set(ctx, set, binding,
+                                    phase == NFT_TRANS_COMMIT);
+       }
+}
+EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
+
 void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
 {
        if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index f1172f99752b..eb7f9a5f2aeb 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -241,11 +241,15 @@ static void nft_dynset_deactivate(const struct nft_ctx 
*ctx,
 {
        struct nft_dynset *priv = nft_expr_priv(expr);
 
-       if (phase == NFT_TRANS_PREPARE)
-               return;
+       nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_dynset_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_dynset *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding,
-                            phase == NFT_TRANS_COMMIT);
+       priv->set->use++;
 }
 
 static void nft_dynset_destroy(const struct nft_ctx *ctx,
@@ -293,6 +297,7 @@ static const struct nft_expr_ops nft_dynset_ops = {
        .eval           = nft_dynset_eval,
        .init           = nft_dynset_init,
        .destroy        = nft_dynset_destroy,
+       .activate       = nft_dynset_activate,
        .deactivate     = nft_dynset_deactivate,
        .dump           = nft_dynset_dump,
 };
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 14496da5141d..161c3451a747 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -127,11 +127,15 @@ static void nft_lookup_deactivate(const struct nft_ctx 
*ctx,
 {
        struct nft_lookup *priv = nft_expr_priv(expr);
 
-       if (phase == NFT_TRANS_PREPARE)
-               return;
+       nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_lookup_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_lookup *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding,
-                            phase == NFT_TRANS_COMMIT);
+       priv->set->use++;
 }
 
 static void nft_lookup_destroy(const struct nft_ctx *ctx,
@@ -222,6 +226,7 @@ static const struct nft_expr_ops nft_lookup_ops = {
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
        .eval           = nft_lookup_eval,
        .init           = nft_lookup_init,
+       .activate       = nft_lookup_activate,
        .deactivate     = nft_lookup_deactivate,
        .destroy        = nft_lookup_destroy,
        .dump           = nft_lookup_dump,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index ae178e914486..bf92a40dd1b2 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -64,21 +64,34 @@ static int nft_objref_dump(struct sk_buff *skb, const 
struct nft_expr *expr)
        return -1;
 }
 
-static void nft_objref_destroy(const struct nft_ctx *ctx,
-                              const struct nft_expr *expr)
+static void nft_objref_deactivate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr,
+                                 enum nft_trans_phase phase)
 {
        struct nft_object *obj = nft_objref_priv(expr);
 
+       if (phase == NFT_TRANS_COMMIT)
+               return;
+
        obj->use--;
 }
 
+static void nft_objref_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_object *obj = nft_objref_priv(expr);
+
+       obj->use++;
+}
+
 static struct nft_expr_type nft_objref_type;
 static const struct nft_expr_ops nft_objref_ops = {
        .type           = &nft_objref_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
        .eval           = nft_objref_eval,
        .init           = nft_objref_init,
-       .destroy        = nft_objref_destroy,
+       .activate       = nft_objref_activate,
+       .deactivate     = nft_objref_deactivate,
        .dump           = nft_objref_dump,
 };
 
@@ -161,11 +174,15 @@ static void nft_objref_map_deactivate(const struct 
nft_ctx *ctx,
 {
        struct nft_objref_map *priv = nft_expr_priv(expr);
 
-       if (phase == NFT_TRANS_PREPARE)
-               return;
+       nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_objref_map_activate(const struct nft_ctx *ctx,
+                                   const struct nft_expr *expr)
+{
+       struct nft_objref_map *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding,
-                            phase == NFT_TRANS_COMMIT);
+       priv->set->use++;
 }
 
 static void nft_objref_map_destroy(const struct nft_ctx *ctx,
@@ -182,6 +199,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
        .eval           = nft_objref_map_eval,
        .init           = nft_objref_map_init,
+       .activate       = nft_objref_map_activate,
        .deactivate     = nft_objref_map_deactivate,
        .destroy        = nft_objref_map_destroy,
        .dump           = nft_objref_map_dump,
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 1d3144d19903..71ffd1a6dc7c 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
        int i;
        int rc = proto_register(&nr_proto, 0);
 
-       if (rc != 0)
-               goto out;
+       if (rc)
+               return rc;
 
        if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to 
large\n");
-               return -1;
+               pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
+                      __func__);
+               rc = -EINVAL;
+               goto unregister_proto;
        }
 
        dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
-       if (dev_nr == NULL) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate 
device array\n");
-               return -1;
+       if (!dev_nr) {
+               pr_err("NET/ROM: %s - unable to allocate device array\n",
+                      __func__);
+               rc = -ENOMEM;
+               goto unregister_proto;
        }
 
        for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
                sprintf(name, "nr%d", i);
                dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
                if (!dev) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to 
allocate device structure\n");
+                       rc = -ENOMEM;
                        goto fail;
                }
 
                dev->base_addr = i;
-               if (register_netdev(dev)) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to 
register network device\n");
+               rc = register_netdev(dev);
+               if (rc) {
                        free_netdev(dev);
                        goto fail;
                }
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
                dev_nr[i] = dev;
        }
 
-       if (sock_register(&nr_family_ops)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register 
socket family\n");
+       rc = sock_register(&nr_family_ops);
+       if (rc)
                goto fail;
-       }
 
-       register_netdevice_notifier(&nr_dev_notifier);
+       rc = register_netdevice_notifier(&nr_dev_notifier);
+       if (rc)
+               goto out_sock;
 
        ax25_register_pid(&nr_pid);
        ax25_linkfail_register(&nr_linkfail_notifier);
 
 #ifdef CONFIG_SYSCTL
-       nr_register_sysctl();
+       rc = nr_register_sysctl();
+       if (rc)
+               goto out_sysctl;
 #endif
 
        nr_loopback_init();
 
-       proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
-       proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
-       proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
-out:
-       return rc;
+       rc = -ENOMEM;
+       if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
+               goto proc_remove1;
+       if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
+                            &nr_neigh_seqops))
+               goto proc_remove2;
+       if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
+                            &nr_node_seqops))
+               goto proc_remove3;
+
+       return 0;
+
+proc_remove3:
+       remove_proc_entry("nr_neigh", init_net.proc_net);
+proc_remove2:
+       remove_proc_entry("nr", init_net.proc_net);
+proc_remove1:
+
+       nr_loopback_clear();
+       nr_rt_free();
+
+#ifdef CONFIG_SYSCTL
+       nr_unregister_sysctl();
+out_sysctl:
+#endif
+       ax25_linkfail_release(&nr_linkfail_notifier);
+       ax25_protocol_release(AX25_P_NETROM);
+       unregister_netdevice_notifier(&nr_dev_notifier);
+out_sock:
+       sock_unregister(PF_NETROM);
 fail:
        while (--i >= 0) {
                unregister_netdev(dev_nr[i]);
                free_netdev(dev_nr[i]);
        }
        kfree(dev_nr);
+unregister_proto:
        proto_unregister(&nr_proto);
-       rc = -1;
-       goto out;
+       return rc;
 }
 
 module_init(nr_proto_init);
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index 215ad22a9647..93d13f019981 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
        }
 }
 
-void __exit nr_loopback_clear(void)
+void nr_loopback_clear(void)
 {
        del_timer_sync(&loopback_timer);
        skb_queue_purge(&loopback_queue);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 6485f593e2f0..b76aa668a94b 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
 /*
  *     Free all memory associated with the nodes and routes lists.
  */
-void __exit nr_rt_free(void)
+void nr_rt_free(void)
 {
        struct nr_neigh *s = NULL;
        struct nr_node  *t = NULL;
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index ba1c368b3f18..771011b84270 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
        { }
 };
 
-void __init nr_register_sysctl(void)
+int __init nr_register_sysctl(void)
 {
        nr_table_header = register_net_sysctl(&init_net, "net/netrom", 
nr_table);
+       if (!nr_table_header)
+               return -ENOMEM;
+       return 0;
 }
 
 void nr_unregister_sysctl(void)
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 65387e1e6964..cd7e01ea8144 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -506,6 +506,9 @@ static int rds_connect(struct socket *sock, struct sockaddr 
*uaddr,
        struct rds_sock *rs = rds_sk_to_rs(sk);
        int ret = 0;
 
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
+
        lock_sock(sk);
 
        switch (uaddr->sa_family) {
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 17c9d9f0c848..0f4398e7f2a7 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, 
int addr_len)
        /* We allow an RDS socket to be bound to either IPv4 or IPv6
         * address.
         */
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        if (uaddr->sa_family == AF_INET) {
                struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index e0f70c4051b6..01e764f8f224 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device 
*rds_ibdev, int npages)
        else
                pool = rds_ibdev->mr_1m_pool;
 
+       if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+               queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+       /* Switch pools if one of the pool is reaching upper limit */
+       if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
+               if (pool->pool_type == RDS_IB_MR_8K_POOL)
+                       pool = rds_ibdev->mr_1m_pool;
+               else
+                       pool = rds_ibdev->mr_8k_pool;
+       }
+
        ibmr = rds_ib_try_reuse_ibmr(pool);
        if (ibmr)
                return ibmr;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 63c8d107adcf..d664e9ade74d 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct 
rds_ib_mr_pool *pool)
        struct rds_ib_mr *ibmr = NULL;
        int iter = 0;
 
-       if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
-               queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
        while (1) {
                ibmr = rds_ib_reuse_mr(pool);
                if (ibmr)
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 7af4f99c4a93..094a6621f8e8 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 
 static struct sk_buff_head loopback_queue;
+#define ROSE_LOOPBACK_LIMIT 1000
 static struct timer_list loopback_timer;
 
 static void rose_set_loopback_timer(void);
@@ -35,29 +36,27 @@ static int rose_loopback_running(void)
 
 int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
 {
-       struct sk_buff *skbn;
+       struct sk_buff *skbn = NULL;
 
-       skbn = skb_clone(skb, GFP_ATOMIC);
+       if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
+               skbn = skb_clone(skb, GFP_ATOMIC);
 
-       kfree_skb(skb);
-
-       if (skbn != NULL) {
+       if (skbn) {
+               consume_skb(skb);
                skb_queue_tail(&loopback_queue, skbn);
 
                if (!rose_loopback_running())
                        rose_set_loopback_timer();
+       } else {
+               kfree_skb(skb);
        }
 
        return 1;
 }
 
-
 static void rose_set_loopback_timer(void)
 {
-       del_timer(&loopback_timer);
-
-       loopback_timer.expires  = jiffies + 10;
-       add_timer(&loopback_timer);
+       mod_timer(&loopback_timer, jiffies + 10);
 }
 
 static void rose_loopback_timer(struct timer_list *unused)
@@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
        struct sock *sk;
        unsigned short frametype;
        unsigned int lci_i, lci_o;
+       int count;
 
-       while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
+       for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
+               skb = skb_dequeue(&loopback_queue);
+               if (!skb)
+                       return;
                if (skb->len < ROSE_MIN_LEN) {
                        kfree_skb(skb);
                        continue;
@@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
                        kfree_skb(skb);
                }
        }
+       if (!skb_queue_empty(&loopback_queue))
+               mod_timer(&loopback_timer, jiffies + 1);
 }
 
 void __exit rose_loopback_clear(void)
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9128aa0e40aa..b4ffb81223ad 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1155,19 +1155,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, 
struct sk_buff *skb)
  * handle data received on the local endpoint
  * - may be called in interrupt context
  *
- * The socket is locked by the caller and this prevents the socket from being
- * shut down and the local endpoint from going away, thus sk_user_data will not
- * be cleared until this function returns.
+ * [!] Note that as this is called from the encap_rcv hook, the socket is not
+ * held locked by the caller and nothing prevents sk_user_data on the UDP from
+ * being cleared in the middle of processing this function.
  *
  * Called with the RCU read lock held from the IP layer via UDP.
  */
 int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
 {
+       struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan;
        struct rxrpc_call *call = NULL;
        struct rxrpc_skb_priv *sp;
-       struct rxrpc_local *local = udp_sk->sk_user_data;
        struct rxrpc_peer *peer = NULL;
        struct rxrpc_sock *rx = NULL;
        unsigned int channel;
@@ -1175,6 +1175,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct 
sk_buff *skb)
 
        _enter("%p", udp_sk);
 
+       if (unlikely(!local)) {
+               kfree_skb(skb);
+               return 0;
+       }
        if (skb->tstamp == 0)
                skb->tstamp = ktime_get_real();
 
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 0906e51d3cfb..10317dbdab5f 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -304,7 +304,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
        ret = -ENOMEM;
 sock_error:
        mutex_unlock(&rxnet->local_mutex);
-       kfree(local);
+       if (local)
+               call_rcu(&local->rcu, rxrpc_local_rcu);
        _leave(" = %d", ret);
        return ERR_PTR(ret);
 
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 12bb23b8e0c5..261131dfa1f1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct 
cache_detail *detail)
        h->last_refresh = now;
 }
 
+static inline int cache_is_valid(struct cache_head *h);
 static void cache_fresh_locked(struct cache_head *head, time_t expiry,
                                struct cache_detail *detail);
 static void cache_fresh_unlocked(struct cache_head *head,
@@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct 
cache_detail *detail,
                        if (cache_is_expired(detail, tmp)) {
                                hlist_del_init_rcu(&tmp->cache_list);
                                detail->entries --;
+                               if (cache_is_valid(tmp) == -EAGAIN)
+                                       set_bit(CACHE_NEGATIVE, &tmp->flags);
                                cache_fresh_locked(tmp, 0, detail);
                                freeme = tmp;
                                break;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 4ad3586da8f0..340a6e7c43a7 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct 
tipc_nl_compat_cmd_dump *cmd,
        if (msg->rep_type)
                tipc_tlv_init(msg->rep, msg->rep_type);
 
-       if (cmd->header)
-               (*cmd->header)(msg);
+       if (cmd->header) {
+               err = (*cmd->header)(msg);
+               if (err) {
+                       kfree_skb(msg->rep);
+                       msg->rep = NULL;
+                       return err;
+               }
+       }
 
        arg = nlmsg_new(0, GFP_KERNEL);
        if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct 
tipc_nl_compat_cmd_doit *cmd,
        if (!bearer)
                return -EMSGSIZE;
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_bearer_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_BEARER_NAME);
        if (!string_is_valid(b->name, len))
                return -EINVAL;
 
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct 
tipc_nl_compat_cmd_doit *cmd,
 
        lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_link_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_LINK_NAME);
        if (!string_is_valid(lc->name, len))
                return -EINVAL;
 
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 4b5ff3d44912..5f1d937c4be9 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -884,7 +884,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct 
tls_context *ctx)
        goto release_netdev;
 
 free_sw_resources:
+       up_read(&device_offload_lock);
        tls_sw_free_resources_rx(sk);
+       down_read(&device_offload_lock);
 release_ctx:
        ctx->priv_ctx_rx = NULL;
 release_netdev:
@@ -919,8 +921,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
        }
 out:
        up_read(&device_offload_lock);
-       kfree(tls_ctx->rx.rec_seq);
-       kfree(tls_ctx->rx.iv);
        tls_sw_release_resources_rx(sk);
 }
 
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 450a6dbc5a88..ef8934fd8698 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -193,6 +193,9 @@ static void update_chksum(struct sk_buff *skb, int headln)
 
 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
 {
+       struct sock *sk = skb->sk;
+       int delta;
+
        skb_copy_header(nskb, skb);
 
        skb_put(nskb, skb->len);
@@ -200,11 +203,15 @@ static void complete_skb(struct sk_buff *nskb, struct 
sk_buff *skb, int headln)
        update_chksum(nskb, headln);
 
        nskb->destructor = skb->destructor;
-       nskb->sk = skb->sk;
+       nskb->sk = sk;
        skb->destructor = NULL;
        skb->sk = NULL;
-       refcount_add(nskb->truesize - skb->truesize,
-                    &nskb->sk->sk_wmem_alloc);
+
+       delta = nskb->truesize - skb->truesize;
+       if (likely(delta < 0))
+               WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
+       else if (delta)
+               refcount_add(delta, &sk->sk_wmem_alloc);
 }
 
 /* This function may be called after the user socket is already
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 96dbac91ac6e..ce5dd79365a7 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -304,11 +304,8 @@ static void tls_sk_proto_close(struct sock *sk, long 
timeout)
 #endif
        }
 
-       if (ctx->rx_conf == TLS_SW) {
-               kfree(ctx->rx.rec_seq);
-               kfree(ctx->rx.iv);
+       if (ctx->rx_conf == TLS_SW)
                tls_sw_free_resources_rx(sk);
-       }
 
 #ifdef CONFIG_TLS_DEVICE
        if (ctx->rx_conf == TLS_HW)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d2d4f7c0d4be..839a0a0b5dfa 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1830,6 +1830,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
 
+       kfree(tls_ctx->rx.rec_seq);
+       kfree(tls_ctx->rx.iv);
+
        if (ctx->aead_recv) {
                kfree_skb(ctx->recv_pkt);
                ctx->recv_pkt = NULL;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f061167062bc..a9f69c3a3e0b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5490,7 +5490,7 @@ static void alc_headset_btn_callback(struct hda_codec 
*codec,
        jack->jack->button_state = report;
 }
 
-static void alc295_fixup_chromebook(struct hda_codec *codec,
+static void alc_fixup_headset_jack(struct hda_codec *codec,
                                    const struct hda_fixup *fix, int action)
 {
 
@@ -5500,16 +5500,6 @@ static void alc295_fixup_chromebook(struct hda_codec 
*codec,
                                                    alc_headset_btn_callback);
                snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
                                      SND_JACK_HEADSET, alc_headset_btn_keymap);
-               switch (codec->core.vendor_id) {
-               case 0x10ec0295:
-                       alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* 
Reset HP JD */
-                       alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
-                       break;
-               case 0x10ec0236:
-                       alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* 
Reset HP JD */
-                       alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
-                       break;
-               }
                break;
        case HDA_FIXUP_ACT_INIT:
                switch (codec->core.vendor_id) {
@@ -5530,6 +5520,25 @@ static void alc295_fixup_chromebook(struct hda_codec 
*codec,
        }
 }
 
+static void alc295_fixup_chromebook(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       switch (action) {
+       case HDA_FIXUP_ACT_INIT:
+               switch (codec->core.vendor_id) {
+               case 0x10ec0295:
+                       alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* 
Reset HP JD */
+                       alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
+                       break;
+               case 0x10ec0236:
+                       alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* 
Reset HP JD */
+                       alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+                       break;
+               }
+               break;
+       }
+}
+
 static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
                                  const struct hda_fixup *fix, int action)
 {
@@ -5684,6 +5693,7 @@ enum {
        ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
        ALC255_FIXUP_ACER_HEADSET_MIC,
        ALC295_FIXUP_CHROME_BOOK,
+       ALC225_FIXUP_HEADSET_JACK,
        ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
@@ -6645,6 +6655,12 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC295_FIXUP_CHROME_BOOK] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc295_fixup_chromebook,
+               .chained = true,
+               .chain_id = ALC225_FIXUP_HEADSET_JACK
+       },
+       [ALC225_FIXUP_HEADSET_JACK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_headset_jack,
        },
        [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
@@ -7143,7 +7159,8 @@ static const struct hda_model_fixup alc269_fixup_models[] 
= {
        {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
        {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
        {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
-       {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
+       {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
+       {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
        {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
        {}
 };

Reply via email to