diff --git a/Makefile b/Makefile
index ad0c045d36cd..a872ece51ee5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 38
+SUBLEVEL = 39
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index d2315ffd8f12..f13ae153fb24 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t 
*elfregs);
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      4096
 
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE        (TASK_SIZE / 3 * 2)
+/* This is the base location for PIE (ET_DYN with INTERP) loads. */
+#define ELF_ET_DYN_BASE                0x400000UL
 
 /* When the program starts, a1 contains a pointer to a function to be 
    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi 
b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index e9bd58793464..49a5d8ccae27 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -75,14 +75,10 @@
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <GIC_PPI 13
-                       (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-                            <GIC_PPI 14
-                       (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-                            <GIC_PPI 11
-                       (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-                            <GIC_PPI 10
-                       (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        soc {
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index a55384f4a5d7..afa23b057def 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -113,12 +113,11 @@
 #define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
 /*
- * This is the location that an ET_DYN program is loaded if exec'ed.  Typical
- * use of this is to invoke "./ld.so someprog" to test out a new version of
- * the loader.  We need to make sure that it is out of the way of the program
- * that it will "exec", and that there is sufficient room for the brk.
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
  */
-#define ELF_ET_DYN_BASE        (2 * TASK_SIZE_64 / 3)
+#define ELF_ET_DYN_BASE                0x100000000UL
 
 #ifndef __ASSEMBLY__
 
@@ -169,7 +168,8 @@ extern int arch_setup_additional_pages(struct linux_binprm 
*bprm,
 
 #ifdef CONFIG_COMPAT
 
-#define COMPAT_ELF_ET_DYN_BASE         (2 * TASK_SIZE_32 / 3)
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
+#define COMPAT_ELF_ET_DYN_BASE         0x000400000UL
 
 /* AArch32 registers. */
 #define COMPAT_ELF_NGREG               18
diff --git a/arch/parisc/include/asm/dma-mapping.h 
b/arch/parisc/include/asm/dma-mapping.h
index 16e024602737..cb7697dec294 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -20,6 +20,8 @@
 ** flush/purge and allocate "regular" cacheable pages for everything.
 */
 
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
+
 #ifdef CONFIG_PA11
 extern struct dma_map_ops pcxl_dma_ops;
 extern struct dma_map_ops pcx_dma_ops;
@@ -54,12 +56,13 @@ parisc_walk_tree(struct device *dev)
                        break;
                }
        }
-       BUG_ON(!dev->platform_data);
        return dev->platform_data;
 }
-               
-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
-       
+
+#define GET_IOC(dev) ({                                        \
+       void *__pdata = parisc_walk_tree(dev);          \
+       __pdata ? HBA_DATA(__pdata)->iommu : NULL;      \
+})
 
 #ifdef CONFIG_IOMMU_CCIO
 struct parisc_device;
diff --git a/arch/parisc/include/asm/mmu_context.h 
b/arch/parisc/include/asm/mmu_context.h
index 59be25764433..a81226257878 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
        mtctl(__space_to_prot(context), 8);
 }
 
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+               struct mm_struct *next, struct task_struct *tsk)
 {
-
        if (prev != next) {
                mtctl(__pa(next->pgd), 25);
                load_context(next->context);
        }
 }
 
+static inline void switch_mm(struct mm_struct *prev,
+               struct mm_struct *next, struct task_struct *tsk)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       switch_mm_irqs_off(prev, next, tsk);
+       local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
 #define deactivate_mm(tsk,mm)  do { } while (0)
 
 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/parisc/kernel/syscall_table.S 
b/arch/parisc/kernel/syscall_table.S
index 3cfef1de8061..8ec2ff8fae0d 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -361,7 +361,7 @@
        ENTRY_SAME(ni_syscall)  /* 263: reserved for vserver */
        ENTRY_SAME(add_key)
        ENTRY_SAME(request_key)         /* 265 */
-       ENTRY_SAME(keyctl)
+       ENTRY_COMP(keyctl)
        ENTRY_SAME(ioprio_set)
        ENTRY_SAME(ioprio_get)
        ENTRY_SAME(inotify_init)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 040c48fc5391..b6f3b5e98810 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -366,7 +366,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
                case 15:        /* Data TLB miss fault/Data page fault */
                        /* send SIGSEGV when outside of vma */
                        if (!vma ||
-                           address < vma->vm_start || address > vma->vm_end) {
+                           address < vma->vm_start || address >= vma->vm_end) {
                                si.si_signo = SIGSEGV;
                                si.si_code = SEGV_MAPERR;
                                break;
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index ee46ffef608e..743ad7a400d6 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -23,12 +23,13 @@
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE        0x20000000
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE                (is_32bit_task() ? 0x000400000UL : \
+                                                  0x100000000UL)
 
 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
 
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d3c94c..8d665f1b29f8 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -158,14 +158,13 @@ extern unsigned int vdso_enabled;
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      4096
 
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk. 64-bit
-   tasks are aligned to 4GB. */
-#define ELF_ET_DYN_BASE (is_compat_task() ? \
-                               (STACK_TOP / 3 * 2) : \
-                               (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE                (is_compat_task() ? 0x000400000UL : \
+                                                   0x100000000UL)
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c 
b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739150e7..f960a043cdeb 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char 
*data,
 
 static bool avx2_usable(void)
 {
-       if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+       if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
                && boot_cpu_has(X86_FEATURE_BMI1)
                && boot_cpu_has(X86_FEATURE_BMI2))
                return true;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 94aad6364b47..c152db2ab687 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -245,12 +245,13 @@ extern int force_personality32;
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      4096
 
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE                (mmap_is_ia32() ? 0x000400000UL : \
+                                                 0x100000000UL)
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 78f3760ca1f2..b601ddac5719 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -405,6 +405,8 @@
 #define MSR_IA32_TSC_ADJUST             0x0000003b
 #define MSR_IA32_BNDCFGS               0x00000d90
 
+#define MSR_IA32_BNDCFGS_RSVD          0x00000ffc
+
 #define MSR_IA32_XSS                   0x00000da0
 
 #define FEATURE_CONTROL_LOCKED                         (1<<0)
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 35058c2c0eea..9368fecca3ee 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -144,6 +144,14 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu 
*vcpu)
        return best && (best->ebx & bit(X86_FEATURE_RTM));
 }
 
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_MPX));
+}
+
 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 04e6bbbd8736..3dc6d8017ce9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2987,7 +2987,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
                break;
        case MSR_IA32_BNDCFGS:
-               if (!kvm_mpx_supported())
+               if (!kvm_mpx_supported() ||
+                   (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
                        return 1;
                msr_info->data = vmcs_read64(GUEST_BNDCFGS);
                break;
@@ -3069,7 +3070,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                vmcs_writel(GUEST_SYSENTER_ESP, data);
                break;
        case MSR_IA32_BNDCFGS:
-               if (!kvm_mpx_supported())
+               if (!kvm_mpx_supported() ||
+                   (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+                       return 1;
+               if (is_noncanonical_address(data & PAGE_MASK) ||
+                   (data & MSR_IA32_BNDCFGS_RSVD))
                        return 1;
                vmcs_write64(GUEST_BNDCFGS, data);
                break;
@@ -6474,7 +6479,6 @@ static __init int hardware_setup(void)
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
-       vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
 
        memcpy(vmx_msr_bitmap_legacy_x2apic,
                        vmx_msr_bitmap_legacy, PAGE_SIZE);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b46798c81d..39efa7e6c0c0 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device 
*dev,
                        value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
                else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
                        value = PM_QOS_LATENCY_ANY;
+               else
+                       return -EINVAL;
        }
        ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
        return ret < 0 ? ret : n;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 62e4de2aa8d1..f98121f11f7c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -60,6 +60,8 @@ static LIST_HEAD(wakeup_sources);
 
 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
 
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
 static struct wakeup_source deleted_ws = {
        .name = "deleted",
        .lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
@@ -198,7 +200,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
        spin_lock_irqsave(&events_lock, flags);
        list_del_rcu(&ws->entry);
        spin_unlock_irqrestore(&events_lock, flags);
-       synchronize_rcu();
+       synchronize_srcu(&wakeup_srcu);
 }
 EXPORT_SYMBOL_GPL(wakeup_source_remove);
 
@@ -332,12 +334,12 @@ void device_wakeup_detach_irq(struct device *dev)
 void device_wakeup_arm_wake_irqs(void)
 {
        struct wakeup_source *ws;
+       int srcuidx;
 
-       rcu_read_lock();
+       srcuidx = srcu_read_lock(&wakeup_srcu);
        list_for_each_entry_rcu(ws, &wakeup_sources, entry)
                dev_pm_arm_wake_irq(ws->wakeirq);
-
-       rcu_read_unlock();
+       srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 
 /**
@@ -348,12 +350,12 @@ void device_wakeup_arm_wake_irqs(void)
 void device_wakeup_disarm_wake_irqs(void)
 {
        struct wakeup_source *ws;
+       int srcuidx;
 
-       rcu_read_lock();
+       srcuidx = srcu_read_lock(&wakeup_srcu);
        list_for_each_entry_rcu(ws, &wakeup_sources, entry)
                dev_pm_disarm_wake_irq(ws->wakeirq);
-
-       rcu_read_unlock();
+       srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 
 /**
@@ -805,10 +807,10 @@ EXPORT_SYMBOL_GPL(pm_wakeup_event);
 void pm_print_active_wakeup_sources(void)
 {
        struct wakeup_source *ws;
-       int active = 0;
+       int srcuidx, active = 0;
        struct wakeup_source *last_activity_ws = NULL;
 
-       rcu_read_lock();
+       srcuidx = srcu_read_lock(&wakeup_srcu);
        list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
                if (ws->active) {
                        pr_info("active wakeup source: %s\n", ws->name);
@@ -824,7 +826,7 @@ void pm_print_active_wakeup_sources(void)
        if (!active && last_activity_ws)
                pr_info("last active wakeup source: %s\n",
                        last_activity_ws->name);
-       rcu_read_unlock();
+       srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
 
@@ -951,8 +953,9 @@ void pm_wakep_autosleep_enabled(bool set)
 {
        struct wakeup_source *ws;
        ktime_t now = ktime_get();
+       int srcuidx;
 
-       rcu_read_lock();
+       srcuidx = srcu_read_lock(&wakeup_srcu);
        list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
                spin_lock_irq(&ws->lock);
                if (ws->autosleep_enabled != set) {
@@ -966,7 +969,7 @@ void pm_wakep_autosleep_enabled(bool set)
                }
                spin_unlock_irq(&ws->lock);
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 #endif /* CONFIG_PM_AUTOSLEEP */
 
@@ -1027,15 +1030,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
 {
        struct wakeup_source *ws;
+       int srcuidx;
 
        seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
                "expire_count\tactive_since\ttotal_time\tmax_time\t"
                "last_change\tprevent_suspend_time\n");
 
-       rcu_read_lock();
+       srcuidx = srcu_read_lock(&wakeup_srcu);
        list_for_each_entry_rcu(ws, &wakeup_sources, entry)
                print_wakeup_source_stats(m, ws);
-       rcu_read_unlock();
+       srcu_read_unlock(&wakeup_srcu, srcuidx);
 
        print_wakeup_source_stats(m, &deleted_ws);
 
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 97e34799e077..6fcf25f795d4 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1000,7 +1000,9 @@ static int atmel_sha_finup(struct ahash_request *req)
        ctx->flags |= SHA_FLAGS_FINUP;
 
        err1 = atmel_sha_update(req);
-       if (err1 == -EINPROGRESS || err1 == -EBUSY)
+       if (err1 == -EINPROGRESS ||
+           (err1 == -EBUSY && (ahash_request_flags(req) &
+                               CRYPTO_TFM_REQ_MAY_BACKLOG)))
                return err1;
 
        /*
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 3bda6e5e2a45..0d743c634f25 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2014,10 +2014,10 @@ static void ablkcipher_encrypt_done(struct device 
*jrdev, u32 *desc, u32 err,
 {
        struct ablkcipher_request *req = context;
        struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
+#ifdef DEBUG
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
@@ -2037,6 +2037,14 @@ static void ablkcipher_encrypt_done(struct device 
*jrdev, u32 *desc, u32 err,
 #endif
 
        ablkcipher_unmap(jrdev, edesc, req);
+
+       /*
+        * The crypto API expects us to set the IV (req->info) to the last
+        * ciphertext block. This is used e.g. by the CTS mode.
+        */
+       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+                                ivsize, 0);
+
        kfree(edesc);
 
        ablkcipher_request_complete(req, err);
@@ -2047,10 +2055,10 @@ static void ablkcipher_decrypt_done(struct device 
*jrdev, u32 *desc, u32 err,
 {
        struct ablkcipher_request *req = context;
        struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
+#ifdef DEBUG
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
@@ -2069,6 +2077,14 @@ static void ablkcipher_decrypt_done(struct device 
*jrdev, u32 *desc, u32 err,
 #endif
 
        ablkcipher_unmap(jrdev, edesc, req);
+
+       /*
+        * The crypto API expects us to set the IV (req->info) to the last
+        * ciphertext block.
+        */
+       scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+                                ivsize, 0);
+
        kfree(edesc);
 
        ablkcipher_request_complete(req, err);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 2474f1494955..631337c2e4a7 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -491,7 +491,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const 
u8 *key_in,
        ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
        if (!ret) {
                /* in progress */
-               wait_for_completion_interruptible(&result.completion);
+               wait_for_completion(&result.completion);
                ret = result.err;
 #ifdef DEBUG
                print_hex_dump(KERN_ERR,
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4ff9762..3ce1d5cdcbd2 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int 
split_key_len,
        ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
        if (!ret) {
                /* in progress */
-               wait_for_completion_interruptible(&result.completion);
+               wait_for_completion(&result.completion);
                ret = result.err;
 #ifdef DEBUG
                print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f41dc0..571de2f284cf 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -816,7 +816,7 @@ static void talitos_unregister_rng(struct device *dev)
  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
  */
 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
-#define TALITOS_MAX_KEY_SIZE           96
+#define TALITOS_MAX_KEY_SIZE           (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
 #define TALITOS_MAX_IV_LENGTH          16 /* max of AES_BLOCK_SIZE, 
DES3_EDE_BLOCK_SIZE */
 
 struct talitos_ctx {
@@ -1495,6 +1495,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher 
*cipher,
 {
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 
+       if (keylen > TALITOS_MAX_KEY_SIZE) {
+               crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+
        memcpy(&ctx->key, key, keylen);
        ctx->keylen = keylen;
 
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 19d642eae096..24d388d74011 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -646,6 +646,9 @@ static int gic_set_affinity(struct irq_data *d, const 
struct cpumask *mask_val,
        int enabled;
        u64 val;
 
+       if (cpu >= nr_cpu_ids)
+               return -EINVAL;
+
        if (gic_irq_in_rdist(d))
                return -EINVAL;
 
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c 
b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 380a64115a98..258bc8deae3b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -230,7 +230,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
        /* Wait for 100ms as Octeon resets. */
        mdelay(100);
 
-       if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
+       if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
                dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
                        oct->octeon_id);
                return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c 
b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index e779af88621b..cda32d5b9977 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -48,7 +48,7 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct)
        /* Wait for 10ms as Octeon resets. */
        mdelay(100);
 
-       if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
+       if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
                dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
                return 1;
        }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ea582342dd8f..9d3722930c95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2671,8 +2671,6 @@ mlx5e_get_stats(struct net_device *dev, struct 
rtnl_link_stats64 *stats)
                PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
        stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
        stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
-       stats->tx_carrier_errors =
-               PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
        stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
                           stats->rx_frame_errors;
        stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c 
b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2115c8aacc5b..8beecd615a21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -67,6 +67,7 @@ enum {
 
 enum {
        MLX5_DROP_NEW_HEALTH_WORK,
+       MLX5_DROP_NEW_RECOVERY_WORK,
 };
 
 static u8 get_nic_state(struct mlx5_core_dev *dev)
@@ -193,7 +194,7 @@ static void health_care(struct work_struct *work)
        mlx5_handle_bad_state(dev);
 
        spin_lock(&health->wq_lock);
-       if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+       if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
                schedule_delayed_work(&health->recover_work, recover_delay);
        else
                dev_err(&dev->pdev->dev,
@@ -328,6 +329,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
        init_timer(&health->timer);
        health->sick = 0;
        clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+       clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
        health->health = &dev->iseg->health;
        health->health_counter = &dev->iseg->health_counter;
 
@@ -350,11 +352,22 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
 
        spin_lock(&health->wq_lock);
        set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+       set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
        spin_unlock(&health->wq_lock);
        cancel_delayed_work_sync(&health->recover_work);
        cancel_work_sync(&health->work);
 }
 
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
+{
+       struct mlx5_core_health *health = &dev->priv.health;
+
+       spin_lock(&health->wq_lock);
+       set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+       spin_unlock(&health->wq_lock);
+       cancel_delayed_work_sync(&dev->priv.health.recover_work);
+}
+
 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_health *health = &dev->priv.health;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5bea0bf2b484..b3309f2ed7dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1169,7 +1169,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, 
struct mlx5_priv *priv,
        int err = 0;
 
        if (cleanup)
-               mlx5_drain_health_wq(dev);
+               mlx5_drain_health_recovery(dev);
 
        mutex_lock(&dev->intf_state_mutex);
        if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c 
b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 4ca461322d60..b1af7cd190a0 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port 
*ofdpa_port,
                *index = entry->index;
                resolved = false;
        } else if (removing) {
-               ofdpa_neigh_del(trans, found);
                *index = found->index;
+               ofdpa_neigh_del(trans, found);
        } else if (updating) {
                ofdpa_neigh_update(found, trans, NULL, false);
                resolved = !is_zero_ether_addr(found->eth_dst);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 00279da6a1e8..c4ada7227f40 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4399,12 +4399,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic 
*efx)
        struct efx_ef10_filter_table *table = efx->filter_state;
        struct net_device *net_dev = efx->net_dev;
        struct netdev_hw_addr *uc;
-       int addr_count;
        unsigned int i;
 
-       addr_count = netdev_uc_count(net_dev);
        table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
-       table->dev_uc_count = 1 + addr_count;
        ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
        i = 1;
        netdev_for_each_uc_addr(uc, net_dev) {
@@ -4415,6 +4412,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic 
*efx)
                ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
                i++;
        }
+
+       table->dev_uc_count = i;
 }
 
 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
@@ -4422,11 +4421,10 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic 
*efx)
        struct efx_ef10_filter_table *table = efx->filter_state;
        struct net_device *net_dev = efx->net_dev;
        struct netdev_hw_addr *mc;
-       unsigned int i, addr_count;
+       unsigned int i;
 
        table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
 
-       addr_count = netdev_mc_count(net_dev);
        i = 0;
        netdev_for_each_mc_addr(mc, net_dev) {
                if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 4865221aa9ac..b88f7d65953d 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640,
        if (overflow) {
                pr_debug("tx timestamp queue overflow, count %d\n", overflow);
                while (skb) {
-                       skb_complete_tx_timestamp(skb, NULL);
+                       kfree_skb(skb);
                        skb = skb_dequeue(&dp83640->tx_queue);
                }
                return;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index fab56c9350cf..222918828655 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -622,6 +622,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
        if ((regval & 0xFF) == 0xFF) {
                phy_init_hw(phydev);
                phydev->link = 0;
+               if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+                       phydev->drv->config_intr(phydev);
        }
 
        return 0;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 642df9391193..578bd5001d93 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -787,15 +787,10 @@ static int vrf_del_slave(struct net_device *dev, struct 
net_device *port_dev)
 static void vrf_dev_uninit(struct net_device *dev)
 {
        struct net_vrf *vrf = netdev_priv(dev);
-       struct net_device *port_dev;
-       struct list_head *iter;
 
        vrf_rtable_release(dev, vrf);
        vrf_rt6_release(dev, vrf);
 
-       netdev_for_each_lower_dev(dev, port_dev, iter)
-               vrf_del_slave(dev, port_dev);
-
        free_percpu(dev->dstats);
        dev->dstats = NULL;
 }
@@ -1232,6 +1227,12 @@ static int vrf_validate(struct nlattr *tb[], struct 
nlattr *data[])
 
 static void vrf_dellink(struct net_device *dev, struct list_head *head)
 {
+       struct net_device *port_dev;
+       struct list_head *iter;
+
+       netdev_for_each_lower_dev(dev, port_dev, iter)
+               vrf_del_slave(dev, port_dev);
+
        unregister_netdevice_queue(dev, head);
 }
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 963e5339a4d7..983e941bdf29 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -227,15 +227,15 @@ static struct vxlan_sock *vxlan_find_sock(struct net 
*net, sa_family_t family,
 
 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
 {
-       struct vxlan_dev *vxlan;
+       struct vxlan_dev_node *node;
 
        /* For flow based devices, map all packets to VNI 0 */
        if (vs->flags & VXLAN_F_COLLECT_METADATA)
                vni = 0;
 
-       hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
-               if (vxlan->default_dst.remote_vni == vni)
-                       return vxlan;
+       hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
+               if (node->vxlan->default_dst.remote_vni == vni)
+                       return node->vxlan;
        }
 
        return NULL;
@@ -2309,17 +2309,22 @@ static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
        spin_lock(&vn->sock_lock);
-       hlist_del_init_rcu(&vxlan->hlist);
+       hlist_del_init_rcu(&vxlan->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+       hlist_del_init_rcu(&vxlan->hlist6.hlist);
+#endif
        spin_unlock(&vn->sock_lock);
 }
 
-static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
+static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
+                            struct vxlan_dev_node *node)
 {
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        __be32 vni = vxlan->default_dst.remote_vni;
 
+       node->vxlan = vxlan;
        spin_lock(&vn->sock_lock);
-       hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+       hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
        spin_unlock(&vn->sock_lock);
 }
 
@@ -2778,6 +2783,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool 
ipv6)
 {
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        struct vxlan_sock *vs = NULL;
+       struct vxlan_dev_node *node;
 
        if (!vxlan->cfg.no_share) {
                spin_lock(&vn->sock_lock);
@@ -2795,12 +2801,16 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, 
bool ipv6)
        if (IS_ERR(vs))
                return PTR_ERR(vs);
 #if IS_ENABLED(CONFIG_IPV6)
-       if (ipv6)
+       if (ipv6) {
                rcu_assign_pointer(vxlan->vn6_sock, vs);
-       else
+               node = &vxlan->hlist6;
+       } else
 #endif
+       {
                rcu_assign_pointer(vxlan->vn4_sock, vs);
-       vxlan_vs_add_dev(vs, vxlan);
+               node = &vxlan->hlist4;
+       }
+       vxlan_vs_add_dev(vs, vxlan, node);
        return 0;
 }
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 
b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 72139b579b18..746f8c9a891d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -705,7 +705,7 @@ int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, 
struct sk_buff *pkt)
 int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
                           struct sk_buff_head *pktq, uint totlen)
 {
-       struct sk_buff *glom_skb;
+       struct sk_buff *glom_skb = NULL;
        struct sk_buff *skb;
        u32 addr = sdiodev->sbwad;
        int err = 0;
@@ -726,10 +726,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
                        return -ENOMEM;
                err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
                                         glom_skb);
-               if (err) {
-                       brcmu_pkt_buf_free_skb(glom_skb);
+               if (err)
                        goto done;
-               }
 
                skb_queue_walk(pktq, skb) {
                        memcpy(skb->data, glom_skb->data, skb->len);
@@ -740,6 +738,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
                                            pktq);
 
 done:
+       brcmu_pkt_buf_free_skb(glom_skb);
        return err;
 }
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 
b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 0f5dde1f2248..1d4352e1ac81 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4928,6 +4928,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct 
wireless_dev *wdev,
                cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
                                        GFP_KERNEL);
        } else if (ieee80211_is_action(mgmt->frame_control)) {
+               if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
+                       brcmf_err("invalid action frame length\n");
+                       err = -EINVAL;
+                       goto exit;
+               }
                af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
                if (af_params == NULL) {
                        brcmf_err("unable to allocate frame\n");
@@ -6871,7 +6876,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct 
brcmf_pub *drvr,
        wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
        if (!wiphy) {
                brcmf_err("Could not allocate wiphy device\n");
-               return NULL;
+               goto ops_out;
        }
        memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
        set_wiphy_dev(wiphy, busdev);
@@ -7005,6 +7010,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct 
brcmf_pub *drvr,
        ifp->vif = NULL;
 wiphy_out:
        brcmf_free_wiphy(wiphy);
+ops_out:
        kfree(ops);
        return NULL;
 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 599cf5090030..cd442e46afb4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue 
*queue)
 {
        RING_IDX req_prod = queue->rx.req_prod_pvt;
        int notify;
+       int err = 0;
 
        if (unlikely(!netif_carrier_ok(queue->info->netdev)))
                return;
@@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue 
*queue)
                struct xen_netif_rx_request *req;
 
                skb = xennet_alloc_one_rx_buffer(queue);
-               if (!skb)
+               if (!skb) {
+                       err = -ENOMEM;
                        break;
+               }
 
                id = xennet_rxidx(req_prod);
 
@@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue 
*queue)
 
        queue->rx.req_prod_pvt = req_prod;
 
-       /* Not enough requests? Try again later. */
-       if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
+       /* Try again later if there are not enough requests or skb allocation
+        * failed.
+        * Enough requests is quantified as the sum of newly created slots and
+        * the unconsumed slots at the backend.
+        */
+       if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
+           unlikely(err)) {
                mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
                return;
        }
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 965911d9b36a..1b4d93e9157e 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -488,21 +488,24 @@ struct nvmem_device *nvmem_register(const struct 
nvmem_config *config)
 
        rval = device_add(&nvmem->dev);
        if (rval)
-               goto out;
+               goto err_put_device;
 
        if (config->compat) {
                rval = nvmem_setup_compat(nvmem, config);
                if (rval)
-                       goto out;
+                       goto err_device_del;
        }
 
        if (config->cells)
                nvmem_add_cells(nvmem, config);
 
        return nvmem;
-out:
-       ida_simple_remove(&nvmem_ida, nvmem->id);
-       kfree(nvmem);
+
+err_device_del:
+       device_del(&nvmem->dev);
+err_put_device:
+       put_device(&nvmem->dev);
+
        return ERR_PTR(rval);
 }
 EXPORT_SYMBOL_GPL(nvmem_register);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 3ed6238f8f6e..c4953eca907d 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
 
        BUG_ON(!dev);
        ioc = GET_IOC(dev);
+       if (!ioc)
+               return DMA_ERROR_CODE;
 
        BUG_ON(size <= 0);
 
@@ -814,6 +816,10 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, 
size_t size,
        
        BUG_ON(!dev);
        ioc = GET_IOC(dev);
+       if (!ioc) {
+               WARN_ON(!ioc);
+               return;
+       }
 
        DBG_RUN("%s() iovp 0x%lx/%x\n",
                __func__, (long)iova, size);
@@ -918,6 +924,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, 
int nents,
        
        BUG_ON(!dev);
        ioc = GET_IOC(dev);
+       if (!ioc)
+               return 0;
        
        DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
 
@@ -990,6 +998,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist 
*sglist, int nents,
 
        BUG_ON(!dev);
        ioc = GET_IOC(dev);
+       if (!ioc) {
+               WARN_ON(!ioc);
+               return;
+       }
 
        DBG_RUN_SG("%s() START %d entries, %p,%x\n",
                __func__, nents, sg_virt(sglist), sglist->length);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 1133b5cc88ca..5c63b920b471 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -154,7 +154,10 @@ struct dino_device
 };
 
 /* Looks nice and keeps the compiler happy */
-#define DINO_DEV(d) ((struct dino_device *) d)
+#define DINO_DEV(d) ({                         \
+       void *__pdata = d;                      \
+       BUG_ON(!__pdata);                       \
+       (struct dino_device *)__pdata; })
 
 
 /*
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 2ec2aef4d211..bc286cbbbc9b 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -111,8 +111,10 @@ static u32 lba_t32;
 
 
 /* Looks nice and keeps the compiler happy */
-#define LBA_DEV(d) ((struct lba_device *) (d))
-
+#define LBA_DEV(d) ({                          \
+       void *__pdata = d;                      \
+       BUG_ON(!__pdata);                       \
+       (struct lba_device *)__pdata; })
 
 /*
 ** Only allow 8 subsidiary busses per LBA
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 151b86b6d2e2..56918d1c0ed3 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
                return 0;
 
        ioc = GET_IOC(dev);
+       if (!ioc)
+               return 0;
 
        /*
         * check if mask is >= than the current max IO Virt Address
@@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
        int pide;
 
        ioc = GET_IOC(dev);
+       if (!ioc)
+               return DMA_ERROR_CODE;
 
        /* save offset bits */
        offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@@ -813,6 +817,10 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t 
size,
        DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
 
        ioc = GET_IOC(dev);
+       if (!ioc) {
+               WARN_ON(!ioc);
+               return;
+       }
        offset = iova & ~IOVP_MASK;
        iova ^= offset;        /* clear offset bits */
        size += offset;
@@ -952,6 +960,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, 
int nents,
        DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
 
        ioc = GET_IOC(dev);
+       if (!ioc)
+               return 0;
 
        /* Fast path single entry scatterlists. */
        if (nents == 1) {
@@ -1037,6 +1047,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist 
*sglist, int nents,
                __func__, nents, sg_virt(sglist), sglist->length);
 
        ioc = GET_IOC(dev);
+       if (!ioc) {
+               WARN_ON(!ioc);
+               return;
+       }
 
 #ifdef SBA_COLLECT_STATS
        ioc->usg_calls++;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8c3bf3d613c0..ce2c3c6349d4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -2711,13 +2711,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
         * related to the kernel should not use this.
         */
                        data = vt_get_shift_state();
-                       ret = __put_user(data, p);
+                       ret = put_user(data, p);
                        break;
                case TIOCL_GETMOUSEREPORTING:
                        console_lock(); /* May be overkill */
                        data = mouse_reporting();
                        console_unlock();
-                       ret = __put_user(data, p);
+                       ret = put_user(data, p);
                        break;
                case TIOCL_SETVESABLANK:
                        console_lock();
@@ -2726,7 +2726,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
                        break;
                case TIOCL_GETKMSGREDIRECT:
                        data = vt_get_kmsg_redirect();
-                       ret = __put_user(data, p);
+                       ret = put_user(data, p);
                        break;
                case TIOCL_SETKMSGREDIRECT:
                        if (!capable(CAP_SYS_ADMIN)) {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index cfd724f98332..1fdf4e5bf8c6 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -911,17 +911,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
                elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
 
                vaddr = elf_ppnt->p_vaddr;
+               /*
+                * If we are loading ET_EXEC or we have already performed
+                * the ET_DYN load_addr calculations, proceed normally.
+                */
                if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
                        elf_flags |= MAP_FIXED;
                } else if (loc->elf_ex.e_type == ET_DYN) {
-                       /* Try and get dynamic programs out of the way of the
-                        * default mmap base, as well as whatever program they
-                        * might try to exec.  This is because the brk will
-                        * follow the loader, and is not movable.  */
-                       load_bias = ELF_ET_DYN_BASE - vaddr;
-                       if (current->flags & PF_RANDOMIZE)
-                               load_bias += arch_mmap_rnd();
-                       load_bias = ELF_PAGESTART(load_bias);
+                       /*
+                        * This logic is run once for the first LOAD Program
+                        * Header for ET_DYN binaries to calculate the
+                        * randomization (load_bias) for all the LOAD
+                        * Program Headers, and to calculate the entire
+                        * size of the ELF mapping (total_size). (Note that
+                        * load_addr_set is set to true later once the
+                        * initial mapping is performed.)
+                        *
+                        * There are effectively two types of ET_DYN
+                        * binaries: programs (i.e. PIE: ET_DYN with INTERP)
+                        * and loaders (ET_DYN without INTERP, since they
+                        * _are_ the ELF interpreter). The loaders must
+                        * be loaded away from programs since the program
+                        * may otherwise collide with the loader (especially
+                        * for ET_EXEC which does not have a randomized
+                        * position). For example to handle invocations of
+                        * "./ld.so someprog" to test out a new version of
+                        * the loader, the subsequent program that the
+                        * loader loads must avoid the loader itself, so
+                        * they cannot share the same load range. Sufficient
+                        * room for the brk must be allocated with the
+                        * loader as well, since brk must be available with
+                        * the loader.
+                        *
+                        * Therefore, programs are loaded offset from
+                        * ELF_ET_DYN_BASE and loaders are loaded into the
+                        * independently randomized mmap region (0 load_bias
+                        * without MAP_FIXED).
+                        */
+                       if (elf_interpreter) {
+                               load_bias = ELF_ET_DYN_BASE;
+                               if (current->flags & PF_RANDOMIZE)
+                                       load_bias += arch_mmap_rnd();
+                               elf_flags |= MAP_FIXED;
+                       } else
+                               load_bias = 0;
+
+                       /*
+                        * Since load_bias is used for all subsequent loading
+                        * calculations, we must lower it by the first vaddr
+                        * so that the remaining calculations based on the
+                        * ELF vaddrs will be correctly offset. The result
+                        * is then page aligned.
+                        */
+                       load_bias = ELF_PAGESTART(load_bias - vaddr);
+
                        total_size = total_mapping_size(elf_phdata,
                                                        loc->elf_ex.e_phnum);
                        if (!total_size) {
diff --git a/fs/dcache.c b/fs/dcache.c
index 4485a48f4091..1dbc6b560fef 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1133,11 +1133,12 @@ void shrink_dcache_sb(struct super_block *sb)
                LIST_HEAD(dispose);
 
                freed = list_lru_walk(&sb->s_dentry_lru,
-                       dentry_lru_isolate_shrink, &dispose, UINT_MAX);
+                       dentry_lru_isolate_shrink, &dispose, 1024);
 
                this_cpu_sub(nr_dentry_unused, freed);
                shrink_dentry_list(&dispose);
-       } while (freed > 0);
+               cond_resched();
+       } while (list_lru_count(&sb->s_dentry_lru) > 0);
 }
 EXPORT_SYMBOL(shrink_dcache_sb);
 
diff --git a/fs/exec.c b/fs/exec.c
index 91441402d706..b8c43be24751 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -215,8 +215,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, 
unsigned long pos,
 
        if (write) {
                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
-               unsigned long ptr_size;
-               struct rlimit *rlim;
+               unsigned long ptr_size, limit;
 
                /*
                 * Since the stack will hold pointers to the strings, we
@@ -245,14 +244,16 @@ static struct page *get_arg_page(struct linux_binprm 
*bprm, unsigned long pos,
                        return page;
 
                /*
-                * Limit to 1/4-th the stack size for the argv+env strings.
+                * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
+                * (whichever is smaller) for the argv+env strings.
                 * This ensures that:
                 *  - the remaining binfmt code will not run out of stack space,
                 *  - the program will have a reasonable amount of stack left
                 *    to work from.
                 */
-               rlim = current->signal->rlim;
-               if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+               limit = _STK_LIM / 4 * 3;
+               limit = min(limit, rlimit(RLIMIT_STACK) / 4);
+               if (size > limit)
                        goto fail;
        }
 
diff --git a/fs/mount.h b/fs/mount.h
index d8295f273a2f..3603884a63dd 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -58,6 +58,7 @@ struct mount {
        struct mnt_namespace *mnt_ns;   /* containing namespace */
        struct mountpoint *mnt_mp;      /* where is it mounted */
        struct hlist_node mnt_mp_list;  /* list mounts with the same mountpoint 
*/
+       struct list_head mnt_umounting; /* list entry for umount propagation */
 #ifdef CONFIG_FSNOTIFY
        struct hlist_head mnt_fsnotify_marks;
        __u32 mnt_fsnotify_mask;
diff --git a/fs/namespace.c b/fs/namespace.c
index 5e35057f07ac..d7360f9897b4 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name)
                INIT_LIST_HEAD(&mnt->mnt_slave_list);
                INIT_LIST_HEAD(&mnt->mnt_slave);
                INIT_HLIST_NODE(&mnt->mnt_mp_list);
+               INIT_LIST_HEAD(&mnt->mnt_umounting);
 #ifdef CONFIG_FSNOTIFY
                INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
 #endif
diff --git a/fs/pnode.c b/fs/pnode.c
index b394ca5307ec..d15c63e97ef1 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
        return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
 }
 
+static inline struct mount *last_slave(struct mount *p)
+{
+       return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
+}
+
 static inline struct mount *next_slave(struct mount *p)
 {
        return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
@@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m,
        }
 }
 
+static struct mount *skip_propagation_subtree(struct mount *m,
+                                               struct mount *origin)
+{
+       /*
+        * Advance m such that propagation_next will not return
+        * the slaves of m.
+        */
+       if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+               m = last_slave(m);
+
+       return m;
+}
+
 static struct mount *next_group(struct mount *m, struct mount *origin)
 {
        while (1) {
@@ -415,65 +433,104 @@ void propagate_mount_unlock(struct mount *mnt)
        }
 }
 
-/*
- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
- */
-static void mark_umount_candidates(struct mount *mnt)
+static void umount_one(struct mount *mnt, struct list_head *to_umount)
 {
-       struct mount *parent = mnt->mnt_parent;
-       struct mount *m;
-
-       BUG_ON(parent == mnt);
-
-       for (m = propagation_next(parent, parent); m;
-                       m = propagation_next(m, parent)) {
-               struct mount *child = __lookup_mnt(&m->mnt,
-                                               mnt->mnt_mountpoint);
-               if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
-                       continue;
-               if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
-                       SET_MNT_MARK(child);
-               }
-       }
+       CLEAR_MNT_MARK(mnt);
+       mnt->mnt.mnt_flags |= MNT_UMOUNT;
+       list_del_init(&mnt->mnt_child);
+       list_del_init(&mnt->mnt_umounting);
+       list_move_tail(&mnt->mnt_list, to_umount);
 }
 
 /*
  * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
  * parent propagates to.
  */
-static void __propagate_umount(struct mount *mnt)
+static bool __propagate_umount(struct mount *mnt,
+                              struct list_head *to_umount,
+                              struct list_head *to_restore)
 {
-       struct mount *parent = mnt->mnt_parent;
-       struct mount *m;
+       bool progress = false;
+       struct mount *child;
 
-       BUG_ON(parent == mnt);
+       /*
+        * The state of the parent won't change if this mount is
+        * already unmounted or marked as without children.
+        */
+       if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
+               goto out;
 
-       for (m = propagation_next(parent, parent); m;
-                       m = propagation_next(m, parent)) {
-               struct mount *topper;
-               struct mount *child = __lookup_mnt(&m->mnt,
-                                               mnt->mnt_mountpoint);
-               /*
-                * umount the child only if the child has no children
-                * and the child is marked safe to unmount.
-                */
-               if (!child || !IS_MNT_MARKED(child))
+       /* Verify topper is the only grandchild that has not been
+        * speculatively unmounted.
+        */
+       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+               if (child->mnt_mountpoint == mnt->mnt.mnt_root)
                        continue;
-               CLEAR_MNT_MARK(child);
+               if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
+                       continue;
+               /* Found a mounted child */
+               goto children;
+       }
 
-               /* If there is exactly one mount covering all of child
-                * replace child with that mount.
-                */
-               topper = find_topper(child);
-               if (topper)
-                       mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
-                                             topper);
+       /* Mark mounts that can be unmounted if not locked */
+       SET_MNT_MARK(mnt);
+       progress = true;
+
+       /* If a mount is without children and not locked umount it. */
+       if (!IS_MNT_LOCKED(mnt)) {
+               umount_one(mnt, to_umount);
+       } else {
+children:
+               list_move_tail(&mnt->mnt_umounting, to_restore);
+       }
+out:
+       return progress;
+}
+
+static void umount_list(struct list_head *to_umount,
+                       struct list_head *to_restore)
+{
+       struct mount *mnt, *child, *tmp;
+       list_for_each_entry(mnt, to_umount, mnt_list) {
+               list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, 
mnt_child) {
+                       /* topper? */
+                       if (child->mnt_mountpoint == mnt->mnt.mnt_root)
+                               list_move_tail(&child->mnt_umounting, 
to_restore);
+                       else
+                               umount_one(child, to_umount);
+               }
+       }
+}
 
-               if (list_empty(&child->mnt_mounts)) {
-                       list_del_init(&child->mnt_child);
-                       child->mnt.mnt_flags |= MNT_UMOUNT;
-                       list_move_tail(&child->mnt_list, &mnt->mnt_list);
+static void restore_mounts(struct list_head *to_restore)
+{
+       /* Restore mounts to a clean working state */
+       while (!list_empty(to_restore)) {
+               struct mount *mnt, *parent;
+               struct mountpoint *mp;
+
+               mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
+               CLEAR_MNT_MARK(mnt);
+               list_del_init(&mnt->mnt_umounting);
+
+               /* Should this mount be reparented? */
+               mp = mnt->mnt_mp;
+               parent = mnt->mnt_parent;
+               while (parent->mnt.mnt_flags & MNT_UMOUNT) {
+                       mp = parent->mnt_mp;
+                       parent = parent->mnt_parent;
                }
+               if (parent != mnt->mnt_parent)
+                       mnt_change_mountpoint(parent, mp, mnt);
+       }
+}
+
+static void cleanup_umount_visitations(struct list_head *visited)
+{
+       while (!list_empty(visited)) {
+               struct mount *mnt =
+                       list_first_entry(visited, struct mount, mnt_umounting);
+               list_del_init(&mnt->mnt_umounting);
        }
 }
 
@@ -487,11 +544,68 @@ static void __propagate_umount(struct mount *mnt)
 int propagate_umount(struct list_head *list)
 {
        struct mount *mnt;
+       LIST_HEAD(to_restore);
+       LIST_HEAD(to_umount);
+       LIST_HEAD(visited);
+
+       /* Find candidates for unmounting */
+       list_for_each_entry_reverse(mnt, list, mnt_list) {
+               struct mount *parent = mnt->mnt_parent;
+               struct mount *m;
+
+               /*
+                * If this mount has already been visited it is known that it's
+                * entire peer group and all of their slaves in the propagation
+                * tree for the mountpoint has already been visited and there is
+                * no need to visit them again.
+                */
+               if (!list_empty(&mnt->mnt_umounting))
+                       continue;
+
+               list_add_tail(&mnt->mnt_umounting, &visited);
+               for (m = propagation_next(parent, parent); m;
+                    m = propagation_next(m, parent)) {
+                       struct mount *child = __lookup_mnt(&m->mnt,
+                                                          mnt->mnt_mountpoint);
+                       if (!child)
+                               continue;
+
+                       if (!list_empty(&child->mnt_umounting)) {
+                               /*
+                                * If the child has already been visited it is
+                                * know that it's entire peer group and all of
+                                * their slaves in the propgation tree for the
+                                * mountpoint has already been visited and there
+                                * is no need to visit this subtree again.
+                                */
+                               m = skip_propagation_subtree(m, parent);
+                               continue;
+                       } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
+                               /*
+                                * We have come accross an partially unmounted
+                                * mount in list that has not been visited yet.
+                                * Remember it has been visited and continue
+                                * about our merry way.
+                                */
+                               list_add_tail(&child->mnt_umounting, &visited);
+                               continue;
+                       }
+
+                       /* Check the child and parents while progress is made */
+                       while (__propagate_umount(child,
+                                                 &to_umount, &to_restore)) {
+                               /* Is the parent a umount candidate? */
+                               child = child->mnt_parent;
+                               if (list_empty(&child->mnt_umounting))
+                                       break;
+                       }
+               }
+       }
 
-       list_for_each_entry_reverse(mnt, list, mnt_list)
-               mark_umount_candidates(mnt);
+       umount_list(&to_umount, &to_restore);
+       restore_mounts(&to_restore);
+       cleanup_umount_visitations(&visited);
+       list_splice_tail(&to_umount, list);
 
-       list_for_each_entry(mnt, list, mnt_list)
-               __propagate_umount(mnt);
        return 0;
 }
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index fa3b155ce7e1..2d65bbd6dbd1 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int 
node);
                (cpu) = cpumask_next_zero((cpu), (mask)),       \
                (cpu) < nr_cpu_ids;)
 
+extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, 
bool wrap);
+
+/**
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a 
specified location
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask poiter
+ * @start: the start location
+ *
+ * The implementation does not assume any bit in @mask is set (including 
@start).
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_wrap(cpu, mask, start)                                    
\
+       for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false);      
\
+            (cpu) < nr_cpumask_bits;                                           
\
+            (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+
 /**
  * for_each_cpu_and - iterate over every cpu in both masks
  * @cpu: the (optionally unsigned) integer iterator
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index cb0ba9f2a9a2..fa7fd03cb5f9 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -44,6 +44,7 @@ struct list_lru_node {
        /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
        struct list_lru_memcg   *memcg_lrus;
 #endif
+       long nr_items;
 } ____cacheline_aligned_in_smp;
 
 struct list_lru {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e1a903a5bb3e..6a620e01b040 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -788,6 +788,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
                        struct mlx5_buf *buf, int node);
 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2e347d4545cf..2c43993e079c 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -22,6 +22,7 @@ struct route_info {
 #include <net/flow.h>
 #include <net/ip6_fib.h>
 #include <net/sock.h>
+#include <net/lwtunnel.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/route.h>
@@ -232,4 +233,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info 
*rt,
                return daddr;
 }
 
+static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info 
*b)
+{
+       return a->dst.dev == b->dst.dev &&
+              a->rt6i_idev == b->rt6i_idev &&
+              ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
+              !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
+}
 #endif
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 308adc4154f4..9fce47e3e13e 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -221,9 +221,17 @@ struct vxlan_config {
        bool                    no_share;
 };
 
+struct vxlan_dev_node {
+       struct hlist_node hlist;
+       struct vxlan_dev *vxlan;
+};
+
 /* Pseudo network device */
 struct vxlan_dev {
-       struct hlist_node hlist;        /* vni hash table */
+       struct vxlan_dev_node hlist4;   /* vni hash table for IPv4 socket */
+#if IS_ENABLED(CONFIG_IPV6)
+       struct vxlan_dev_node hlist6;   /* vni hash table for IPv6 socket */
+#endif
        struct list_head  next;         /* vxlan's per namespace list */
        struct vxlan_sock __rcu *vn4_sock;      /* listening socket for IPv4 */
 #if IS_ENABLED(CONFIG_IPV6)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 44c17f47d94c..8ce679d36c58 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -885,6 +885,11 @@ static int check_xadd(struct bpf_verifier_env *env, struct 
bpf_insn *insn)
        if (err)
                return err;
 
+       if (is_pointer_value(env, insn->src_reg)) {
+               verbose("R%d leaks addr into mem\n", insn->src_reg);
+               return -EACCES;
+       }
+
        /* check whether atomic_add can read the memory */
        err = check_mem_access(env, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_READ, -1);
diff --git a/kernel/extable.c b/kernel/extable.c
index e820ccee9846..4f06fc34313f 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr)
        return 0;
 }
 
-int core_kernel_text(unsigned long addr)
+int notrace core_kernel_text(unsigned long addr)
 {
        if (addr >= (unsigned long)_stext &&
            addr < (unsigned long)_etext)
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 56583e764ebf..e3944c4b072d 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1767,6 +1767,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool 
force)
        if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
                /* Prior smp_mb__after_atomic() orders against prior enqueue. */
                WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
+               smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
                swake_up(&rdp_leader->nocb_wq);
        }
 }
@@ -2021,6 +2022,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
         * nocb_gp_head, where they await a grace period.
         */
        gotcbs = false;
+       smp_mb(); /* wakeup before ->nocb_head reads. */
        for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
                rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
                if (!rdp->nocb_gp_head)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 692c948ae333..d177b21d04ce 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6102,6 +6102,9 @@ enum s_alloc {
  * Build an iteration mask that can exclude certain CPUs from the upwards
  * domain traversal.
  *
+ * Only CPUs that can arrive at this group should be considered to continue
+ * balancing.
+ *
  * Asymmetric node setups can result in situations where the domain tree is of
  * unequal depth, make sure to skip domains that already cover the entire
  * range.
@@ -6113,18 +6116,31 @@ enum s_alloc {
  */
 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
 {
-       const struct cpumask *span = sched_domain_span(sd);
+       const struct cpumask *sg_span = sched_group_cpus(sg);
        struct sd_data *sdd = sd->private;
        struct sched_domain *sibling;
        int i;
 
-       for_each_cpu(i, span) {
+       for_each_cpu(i, sg_span) {
                sibling = *per_cpu_ptr(sdd->sd, i);
-               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+
+               /*
+                * Can happen in the asymmetric case, where these siblings are
+                * unused. The mask will not be empty because those CPUs that
+                * do have the top domain _should_ span the domain.
+                */
+               if (!sibling->child)
+                       continue;
+
+               /* If we would not end up here, we can't continue from here */
+               if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
                        continue;
 
                cpumask_set_cpu(i, sched_group_mask(sg));
        }
+
+       /* We must not have empty masks here */
+       WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
 }
 
 /*
@@ -6148,7 +6164,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int 
cpu)
 
        cpumask_clear(covered);
 
-       for_each_cpu(i, span) {
+       for_each_cpu_wrap(i, span, cpu) {
                struct cpumask *sg_span;
 
                if (cpumask_test_cpu(i, covered))
@@ -7422,22 +7438,6 @@ int sched_cpu_dying(unsigned int cpu)
 }
 #endif
 
-#ifdef CONFIG_SCHED_SMT
-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-
-static void sched_init_smt(void)
-{
-       /*
-        * We've enumerated all CPUs and will assume that if any CPU
-        * has SMT siblings, CPU0 will too.
-        */
-       if (cpumask_weight(cpu_smt_mask(0)) > 1)
-               static_branch_enable(&sched_smt_present);
-}
-#else
-static inline void sched_init_smt(void) { }
-#endif
-
 void __init sched_init_smp(void)
 {
        cpumask_var_t non_isolated_cpus;
@@ -7467,9 +7467,6 @@ void __init sched_init_smp(void)
 
        init_sched_rt_class();
        init_sched_dl_class();
-
-       sched_init_smt();
-
        sched_smp_initialized = true;
 }
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c242944f5cbd..7a68c631d5b5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5310,43 +5310,6 @@ find_idlest_cpu(struct sched_group *group, struct 
task_struct *p, int this_cpu)
        return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : 
least_loaded_cpu;
 }
 
-/*
- * Implement a for_each_cpu() variant that starts the scan at a given cpu
- * (@start), and wraps around.
- *
- * This is used to scan for idle CPUs; such that not all CPUs looking for an
- * idle CPU find the same CPU. The down-side is that tasks tend to cycle
- * through the LLC domain.
- *
- * Especially tbench is found sensitive to this.
- */
-
-static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int 
*wrapped)
-{
-       int next;
-
-again:
-       next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
-
-       if (*wrapped) {
-               if (next >= start)
-                       return nr_cpumask_bits;
-       } else {
-               if (next >= nr_cpumask_bits) {
-                       *wrapped = 1;
-                       n = -1;
-                       goto again;
-               }
-       }
-
-       return next;
-}
-
-#define for_each_cpu_wrap(cpu, mask, start, wrap)                              
\
-       for ((wrap) = 0, (cpu) = (start)-1;                                     
\
-               (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)),     
\
-               (cpu) < nr_cpumask_bits; )
-
 #ifdef CONFIG_SCHED_SMT
 
 static inline void set_idle_cores(int cpu, int val)
@@ -5376,7 +5339,7 @@ static inline bool test_idle_cores(int cpu, bool def)
  * Since SMT siblings share all cache levels, inspecting this limited remote
  * state should be fairly cheap.
  */
-void __update_idle_core(struct rq *rq)
+void update_idle_core(struct rq *rq)
 {
        int core = cpu_of(rq);
        int cpu;
@@ -5406,17 +5369,14 @@ void __update_idle_core(struct rq *rq)
 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, 
int target)
 {
        struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
-       int core, cpu, wrap;
-
-       if (!static_branch_likely(&sched_smt_present))
-               return -1;
+       int core, cpu;
 
        if (!test_idle_cores(target, false))
                return -1;
 
        cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
 
-       for_each_cpu_wrap(core, cpus, target, wrap) {
+       for_each_cpu_wrap(core, cpus, target) {
                bool idle = true;
 
                for_each_cpu(cpu, cpu_smt_mask(core)) {
@@ -5444,9 +5404,6 @@ static int select_idle_smt(struct task_struct *p, struct 
sched_domain *sd, int t
 {
        int cpu;
 
-       if (!static_branch_likely(&sched_smt_present))
-               return -1;
-
        for_each_cpu(cpu, cpu_smt_mask(target)) {
                if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
                        continue;
@@ -5482,7 +5439,7 @@ static int select_idle_cpu(struct task_struct *p, struct 
sched_domain *sd, int t
        u64 avg_cost, avg_idle = this_rq()->avg_idle;
        u64 time, cost;
        s64 delta;
-       int cpu, wrap;
+       int cpu;
 
        this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
        if (!this_sd)
@@ -5499,7 +5456,7 @@ static int select_idle_cpu(struct task_struct *p, struct 
sched_domain *sd, int t
 
        time = local_clock();
 
-       for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
+       for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
                if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
                        continue;
                if (idle_cpu(cpu))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 055f935d4421..ad77d666583c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -43,6 +43,12 @@ extern void cpu_load_update_active(struct rq *this_rq);
 static inline void cpu_load_update_active(struct rq *this_rq) { }
 #endif
 
+#ifdef CONFIG_SCHED_SMT
+extern void update_idle_core(struct rq *rq);
+#else
+static inline void update_idle_core(struct rq *rq) { }
+#endif
+
 /*
  * Helpers for converting nanosecond timing to jiffy resolution
  */
@@ -731,23 +737,6 @@ static inline int cpu_of(struct rq *rq)
 #endif
 }
 
-
-#ifdef CONFIG_SCHED_SMT
-
-extern struct static_key_false sched_smt_present;
-
-extern void __update_idle_core(struct rq *rq);
-
-static inline void update_idle_core(struct rq *rq)
-{
-       if (static_branch_unlikely(&sched_smt_present))
-               __update_idle_core(rq);
-}
-
-#else
-static inline void update_idle_core(struct rq *rq) { }
-#endif
-
 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 83c60f9013cb..52ee2c51f4b3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1906,7 +1906,7 @@ tracing_generic_entry_update(struct trace_entry *entry, 
unsigned long flags,
 #endif
                ((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
-               ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+               ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
                (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
                (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
 }
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 81dedaab36cc..4731a0895760 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int 
cpu)
 }
 EXPORT_SYMBOL(cpumask_any_but);
 
+/**
+ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
+ * @n: the cpu prior to the place to search
+ * @mask: the cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+       int next;
+
+again:
+       next = cpumask_next(n, mask);
+
+       if (wrap && n < start && next >= start) {
+               return nr_cpumask_bits;
+
+       } else if (next >= nr_cpumask_bits) {
+               wrap = true;
+               n = -1;
+               goto again;
+       }
+
+       return next;
+}
+EXPORT_SYMBOL(cpumask_next_wrap);
+
 /* These are not inline because of header tangles. */
 #ifdef CONFIG_CPUMASK_OFFSTACK
 /**
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e7d5db958538..8258e9eee806 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1373,8 +1373,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
                get_page(page);
                spin_unlock(ptl);
                split_huge_page(page);
-               put_page(page);
                unlock_page(page);
+               put_page(page);
                goto out_unlocked;
        }
 
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 234676e31edd..7a40fa2be858 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head 
*item)
                l = list_lru_from_kmem(nlru, item);
                list_add_tail(item, &l->list);
                l->nr_items++;
+               nlru->nr_items++;
                spin_unlock(&nlru->lock);
                return true;
        }
@@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head 
*item)
                l = list_lru_from_kmem(nlru, item);
                list_del_init(item);
                l->nr_items--;
+               nlru->nr_items--;
                spin_unlock(&nlru->lock);
                return true;
        }
@@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
 
 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 {
-       long count = 0;
-       int memcg_idx;
+       struct list_lru_node *nlru;
 
-       count += __list_lru_count_one(lru, nid, -1);
-       if (list_lru_memcg_aware(lru)) {
-               for_each_memcg_cache_index(memcg_idx)
-                       count += __list_lru_count_one(lru, nid, memcg_idx);
-       }
-       return count;
+       nlru = &lru->node[nid];
+       return nlru->nr_items;
 }
 EXPORT_SYMBOL_GPL(list_lru_count_node);
 
@@ -226,6 +223,7 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int 
memcg_idx,
                        assert_spin_locked(&nlru->lock);
                case LRU_REMOVED:
                        isolated++;
+                       nlru->nr_items--;
                        /*
                         * If the lru lock has been dropped, our list
                         * traversal is now invalid and so we have to
diff --git a/mm/mmap.c b/mm/mmap.c
index 145d3d5253e8..75d263bd8739 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2228,7 +2228,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned 
long address)
 
        /* Guard against exceeding limits of the address space. */
        address &= PAGE_MASK;
-       if (address >= TASK_SIZE)
+       if (address >= (TASK_SIZE & PAGE_MASK))
                return -ENOMEM;
        address += PAGE_SIZE;
 
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 7dbc80d01eb0..6406010e155b 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -323,7 +323,8 @@ static void __br_mdb_notify(struct net_device *dev, struct 
net_bridge_port *p,
                        __mdb_entry_to_br_ip(entry, &complete_info->ip);
                        mdb.obj.complete_priv = complete_info;
                        mdb.obj.complete = br_mdb_complete;
-                       switchdev_port_obj_add(port_dev, &mdb.obj);
+                       if (switchdev_port_obj_add(port_dev, &mdb.obj))
+                               kfree(complete_info);
                }
        } else if (port_dev && type == RTM_DELMDB) {
                switchdev_port_obj_del(port_dev, &mdb.obj);
diff --git a/net/core/dev.c b/net/core/dev.c
index 97f806116ae9..c17952b6e0b6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4641,6 +4641,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 
type)
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
+static void napi_skb_free_stolen_head(struct sk_buff *skb)
+{
+       skb_dst_drop(skb);
+       kmem_cache_free(skbuff_head_cache, skb);
+}
+
 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 {
        switch (ret) {
@@ -4654,12 +4660,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, 
struct sk_buff *skb)
                break;
 
        case GRO_MERGED_FREE:
-               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
-                       skb_dst_drop(skb);
-                       kmem_cache_free(skbuff_head_cache, skb);
-               } else {
+               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+                       napi_skb_free_stolen_head(skb);
+               else
                        __kfree_skb(skb);
-               }
                break;
 
        case GRO_HELD:
@@ -4729,10 +4733,16 @@ static gro_result_t napi_frags_finish(struct 
napi_struct *napi,
                break;
 
        case GRO_DROP:
-       case GRO_MERGED_FREE:
                napi_reuse_skb(napi, skb);
                break;
 
+       case GRO_MERGED_FREE:
+               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+                       napi_skb_free_stolen_head(skb);
+               else
+                       napi_reuse_skb(napi, skb);
+               break;
+
        case GRO_MERGED:
                break;
        }
@@ -7521,7 +7531,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 
*stats64,
 {
 #if BITS_PER_LONG == 64
        BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
-       memcpy(stats64, netdev_stats, sizeof(*stats64));
+       memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
        /* zero out counters that only exist in rtnl_link_stats64 */
        memset((char *)stats64 + sizeof(*netdev_stats), 0,
               sizeof(*stats64) - sizeof(*netdev_stats));
@@ -7563,9 +7573,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device 
*dev,
        } else {
                netdev_stats_to_stats64(storage, &dev->stats);
        }
-       storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
-       storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
-       storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
+       storage->rx_dropped += (unsigned 
long)atomic_long_read(&dev->rx_dropped);
+       storage->tx_dropped += (unsigned 
long)atomic_long_read(&dev->tx_dropped);
+       storage->rx_nohandler += (unsigned 
long)atomic_long_read(&dev->rx_nohandler);
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 86fbf0f3235e..1a4db27f5833 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2300,6 +2300,8 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_init_send_head(sk);
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
+       dst_release(sk->sk_rx_dst);
+       sk->sk_rx_dst = NULL;
        tcp_saved_syn_free(tp);
 
        WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b6f4c42cc8ce..b2cabda72320 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1875,15 +1875,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, 
int dad_failed)
        if (dad_failed)
                ifp->flags |= IFA_F_DADFAILED;
 
-       if (ifp->flags&IFA_F_PERMANENT) {
-               spin_lock_bh(&ifp->lock);
-               addrconf_del_dad_work(ifp);
-               ifp->flags |= IFA_F_TENTATIVE;
-               spin_unlock_bh(&ifp->lock);
-               if (dad_failed)
-                       ipv6_ifa_notify(0, ifp);
-               in6_ifa_put(ifp);
-       } else if (ifp->flags&IFA_F_TEMPORARY) {
+       if (ifp->flags&IFA_F_TEMPORARY) {
                struct inet6_ifaddr *ifpub;
                spin_lock_bh(&ifp->lock);
                ifpub = ifp->ifpub;
@@ -1896,6 +1888,14 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, 
int dad_failed)
                        spin_unlock_bh(&ifp->lock);
                }
                ipv6_del_addr(ifp);
+       } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
+               spin_lock_bh(&ifp->lock);
+               addrconf_del_dad_work(ifp);
+               ifp->flags |= IFA_F_TENTATIVE;
+               spin_unlock_bh(&ifp->lock);
+               if (dad_failed)
+                       ipv6_ifa_notify(0, ifp);
+               in6_ifa_put(ifp);
        } else {
                ipv6_del_addr(ifp);
        }
@@ -3316,6 +3316,7 @@ static int addrconf_notify(struct notifier_block *this, 
unsigned long event,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct netdev_notifier_changeupper_info *info;
        struct inet6_dev *idev = __in6_dev_get(dev);
+       struct net *net = dev_net(dev);
        int run_pending = 0;
        int err;
 
@@ -3331,7 +3332,7 @@ static int addrconf_notify(struct notifier_block *this, 
unsigned long event,
        case NETDEV_CHANGEMTU:
                /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
                if (dev->mtu < IPV6_MIN_MTU) {
-                       addrconf_ifdown(dev, 1);
+                       addrconf_ifdown(dev, dev != net->loopback_dev);
                        break;
                }
 
@@ -3447,7 +3448,7 @@ static int addrconf_notify(struct notifier_block *this, 
unsigned long event,
                         * IPV6_MIN_MTU stop IPv6 on this interface.
                         */
                        if (dev->mtu < IPV6_MIN_MTU)
-                               addrconf_ifdown(dev, 1);
+                               addrconf_ifdown(dev, dev != net->loopback_dev);
                }
                break;
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 636d4d893085..4345ee39f180 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -771,10 +771,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct 
rt6_info *rt,
                                goto next_iter;
                        }
 
-                       if (iter->dst.dev == rt->dst.dev &&
-                           iter->rt6i_idev == rt->rt6i_idev &&
-                           ipv6_addr_equal(&iter->rt6i_gateway,
-                                           &rt->rt6i_gateway)) {
+                       if (rt6_duplicate_nexthop(iter, rt)) {
                                if (rt->rt6i_nsiblings)
                                        rt->rt6i_nsiblings = 0;
                                if (!(iter->rt6i_flags & RTF_EXPIRES))
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b8b475389ae4..5764a84465f8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2953,17 +2953,11 @@ static int ip6_route_info_append(struct list_head 
*rt6_nh_list,
                                 struct rt6_info *rt, struct fib6_config *r_cfg)
 {
        struct rt6_nh *nh;
-       struct rt6_info *rtnh;
        int err = -EEXIST;
 
        list_for_each_entry(nh, rt6_nh_list, next) {
                /* check if rt6_info already exists */
-               rtnh = nh->rt6_info;
-
-               if (rtnh->dst.dev == rt->dst.dev &&
-                   rtnh->rt6i_idev == rt->rt6i_idev &&
-                   ipv6_addr_equal(&rtnh->rt6i_gateway,
-                                   &rt->rt6i_gateway))
+               if (rt6_duplicate_nexthop(nh->rt6_info, rt))
                        return err;
        }
 
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index e0b23fb5b8d5..525b624fec8b 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -129,7 +129,7 @@ int rds_tcp_accept_one(struct socket *sock)
        if (!sock) /* module unload or netns delete in progress */
                return -ENETUNREACH;
 
-       ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+       ret = sock_create_lite(sock->sk->sk_family,
                               sock->sk->sk_type, sock->sk->sk_protocol,
                               &new_sock);
        if (ret)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 206dc24add3a..ff27a85a71a9 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1008,6 +1008,9 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 
                return sch;
        }
+       /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
+       if (ops->destroy)
+               ops->destroy(sch);
 err_out3:
        dev_put(dev);
        kfree((char *) sch - sch->padded);
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index e3d0458af17b..2fae8b5f1b80 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -627,7 +627,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
                        q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
                                                      sizeof(u32));
                        if (!q->hhf_arrays[i]) {
-                               hhf_destroy(sch);
+                               /* Note: hhf_destroy() will be called
+                                * by our caller.
+                                */
                                return -ENOMEM;
                        }
                }
@@ -638,7 +640,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
                        q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
                                                          BITS_PER_BYTE);
                        if (!q->hhf_valid_bits[i]) {
-                               hhf_destroy(sch);
+                               /* Note: hhf_destroy() will be called
+                                * by our caller.
+                                */
                                return -ENOMEM;
                        }
                }
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 2bc8d7f8df16..20b7f1646f69 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
        /* pre-allocate qdiscs, attachment can't fail */
        priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
                               GFP_KERNEL);
-       if (priv->qdiscs == NULL)
+       if (!priv->qdiscs)
                return -ENOMEM;
 
        for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
@@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
                qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, 
ntx),
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
                                                    TC_H_MIN(ntx + 1)));
-               if (qdisc == NULL)
-                       goto err;
+               if (!qdisc)
+                       return -ENOMEM;
                priv->qdiscs[ntx] = qdisc;
                qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        }
 
        sch->flags |= TCQ_F_MQROOT;
        return 0;
-
-err:
-       mq_destroy(sch);
-       return -ENOMEM;
 }
 
 static void mq_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b5c502c78143..922683418e53 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -118,10 +118,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr 
*opt)
        /* pre-allocate qdisc, attachment can't fail */
        priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
                               GFP_KERNEL);
-       if (priv->qdiscs == NULL) {
-               err = -ENOMEM;
-               goto err;
-       }
+       if (!priv->qdiscs)
+               return -ENOMEM;
 
        for (i = 0; i < dev->num_tx_queues; i++) {
                dev_queue = netdev_get_tx_queue(dev, i);
@@ -129,10 +127,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr 
*opt)
                                          get_default_qdisc_ops(dev, i),
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
                                                    TC_H_MIN(i + 1)));
-               if (qdisc == NULL) {
-                       err = -ENOMEM;
-                       goto err;
-               }
+               if (!qdisc)
+                       return -ENOMEM;
+
                priv->qdiscs[i] = qdisc;
                qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        }
@@ -148,7 +145,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr 
*opt)
                priv->hw_owned = 1;
                err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
                if (err)
-                       goto err;
+                       return err;
        } else {
                netdev_set_num_tc(dev, qopt->num_tc);
                for (i = 0; i < qopt->num_tc; i++)
@@ -162,10 +159,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr 
*opt)
 
        sch->flags |= TCQ_F_MQROOT;
        return 0;
-
-err:
-       mqprio_destroy(sch);
-       return err;
 }
 
 static void mqprio_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7f195ed4d568..bc5e99584e41 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
        q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
        if (!q->ht || !q->slots) {
-               sfq_destroy(sch);
+               /* Note: sfq_destroy() will be called by our caller */
                return -ENOMEM;
        }
+
        for (i = 0; i < q->divisor; i++)
                q->ht[i] = SFQ_EMPTY_SLOT;
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e7a3068a1c3b..e9e9bc5c8773 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -305,8 +305,7 @@ static const struct nla_policy 
nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
        [NL80211_ATTR_PID] = { .type = NLA_U32 },
        [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
-       [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
-                                .len = WLAN_PMKID_LEN },
+       [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
        [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
        [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
        [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -362,6 +361,7 @@ static const struct nla_policy 
nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
        [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
        [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
+       [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
        [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
        [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
        [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -512,7 +512,7 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] 
= {
 static const struct nla_policy
 nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
        [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
-       [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY,
+       [NL80211_NAN_FUNC_SERVICE_ID] = {
                                    .len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
        [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
        [NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG },
@@ -6326,6 +6326,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
        struct nlattr *attr1, *attr2;
        int n_channels = 0, tmp1, tmp2;
 
+       nla_for_each_nested(attr1, freqs, tmp1)
+               if (nla_len(attr1) != sizeof(u32))
+                       return 0;
+
        nla_for_each_nested(attr1, freqs, tmp1) {
                n_channels++;
                /*
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a8368d1c4348..55171647f516 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3499,7 +3499,7 @@ sub process {
                                $fixedline =~ s/\s*=\s*$/ = {/;
                                fix_insert_line($fixlinenr, $fixedline);
                                $fixedline = $line;
-                               $fixedline =~ s/^(.\s*){\s*/$1/;
+                               $fixedline =~ s/^(.\s*)\{\s*/$1/;
                                fix_insert_line($fixlinenr, $fixedline);
                        }
                }
@@ -3840,7 +3840,7 @@ sub process {
                                my $fixedline = rtrim($prevrawline) . " {";
                                fix_insert_line($fixlinenr, $fixedline);
                                $fixedline = $rawline;
-                               $fixedline =~ s/^(.\s*){\s*/$1\t/;
+                               $fixedline =~ s/^(.\s*)\{\s*/$1\t/;
                                if ($fixedline !~ /^\+\s*$/) {
                                        fix_insert_line($fixlinenr, $fixedline);
                                }
@@ -4329,7 +4329,7 @@ sub process {
                        if (ERROR("SPACING",
                                  "space required before the open brace '{'\n" 
. $herecurr) &&
                            $fix) {
-                               $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
+                               $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 
{/;
                        }
                }
 
diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h 
b/tools/lib/lockdep/uinclude/linux/lockdep.h
index c808c7d02d21..d30214221920 100644
--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
+++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
@@ -8,7 +8,7 @@
 #include <linux/utsname.h>
 #include <linux/compiler.h>
 
-#define MAX_LOCK_DEPTH 2000UL
+#define MAX_LOCK_DEPTH 63UL
 
 #define asmlinkage
 #define __visible
diff --git a/tools/testing/selftests/capabilities/test_execve.c 
b/tools/testing/selftests/capabilities/test_execve.c
index 10a21a958aaf..763f37fecfb8 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
 
        if (chdir(cwd) != 0)
                err(1, "chdir to private tmpfs");
-
-       if (umount2(".", MNT_DETACH) != 0)
-               err(1, "detach private tmpfs");
 }
 
 static void copy_fromat_to(int fromfd, const char *fromname, const char 
*toname)
@@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path)
                        err(1, "chown");
                if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
                        err(1, "chmod");
-}
+       }
 
        capng_get_caps_process();
 
@@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path)
        } else {
                printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
                exec_other_validate_cap("./validate_cap_sgidnonroot",
-                                               false, false, true, false);
+                                       false, false, true, false);
 
                if (fork_wait()) {
                        printf("[RUN]\tNon-root +ia, sgidroot => i\n");

Reply via email to