commit: 30030103dd1826abf4c3dbb3f9c6ba981a93dbfa Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Wed Aug 30 10:06:33 2017 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Wed Aug 30 10:06:33 2017 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=30030103
Linux patch 4.9.46 0000_README | 4 + 1045_linux-4.9.46.patch | 3114 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 3118 insertions(+) diff --git a/0000_README b/0000_README index e142b57..9af1e8a 100644 --- a/0000_README +++ b/0000_README @@ -223,6 +223,10 @@ Patch: 1044_linux-4.9.45.patch From: http://www.kernel.org Desc: Linux 4.9.45 +Patch: 1045_linux-4.9.46.patch +From: http://www.kernel.org +Desc: Linux 4.9.46 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1045_linux-4.9.46.patch b/1045_linux-4.9.46.patch new file mode 100644 index 0000000..72e724f --- /dev/null +++ b/1045_linux-4.9.46.patch @@ -0,0 +1,3114 @@ +diff --git a/Makefile b/Makefile +index ccd6d91f616e..846ef1b57a02 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 45 ++SUBLEVEL = 46 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h +index b3410ff6a62d..4fd6272e6c01 100644 +--- a/arch/arc/include/asm/cache.h ++++ b/arch/arc/include/asm/cache.h +@@ -89,7 +89,9 @@ extern unsigned long perip_base, perip_end; + #define ARC_REG_SLC_FLUSH 0x904 + #define ARC_REG_SLC_INVALIDATE 0x905 + #define ARC_REG_SLC_RGN_START 0x914 ++#define ARC_REG_SLC_RGN_START1 0x915 + #define ARC_REG_SLC_RGN_END 0x916 ++#define ARC_REG_SLC_RGN_END1 0x917 + + /* Bit val in SLC_CONTROL */ + #define SLC_CTRL_IM 0x040 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c +index 8147583c4434..bbdfeb31dee6 100644 +--- a/arch/arc/mm/cache.c ++++ b/arch/arc/mm/cache.c +@@ -562,6 +562,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) + static DEFINE_SPINLOCK(lock); + unsigned long flags; + unsigned int ctrl; ++ phys_addr_t end; + + spin_lock_irqsave(&lock, flags); + +@@ -591,8 +592,16 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) + * END needs to be setup before START (latter triggers the operation) + * END can't be same as START, so add (l2_line_sz - 1) to sz + */ +- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); +- write_aux_reg(ARC_REG_SLC_RGN_START, paddr); ++ end = paddr + sz + l2_line_sz - 1; ++ if (is_pae40_enabled()) ++ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end)); ++ ++ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end)); ++ ++ if (is_pae40_enabled()) ++ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr)); ++ ++ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr)); + + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); + +diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h +index 0012f0353fd6..fe208b70b8b1 100644 +--- a/arch/powerpc/include/asm/mmu_context.h ++++ b/arch/powerpc/include/asm/mmu_context.h +@@ -75,9 +75,27 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, + struct task_struct *tsk) + { + /* Mark this context has been used on the new CPU */ +- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) ++ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + ++ /* ++ * This full barrier orders the store to the cpumask above vs ++ * a subsequent operation which allows this CPU to begin loading ++ * translations for next. ++ * ++ * When using the radix MMU that operation is the load of the ++ * MMU context id, which is then moved to SPRN_PID. ++ * ++ * For the hash MMU it is either the first load from slb_cache ++ * in switch_slb(), and/or the store of paca->mm_ctx_id in ++ * copy_mm_to_paca(). ++ * ++ * On the read side the barrier is in pte_xchg(), which orders ++ * the store to the PTE vs the load of mm_cpumask. ++ */ ++ smp_mb(); ++ } ++ + /* 32-bit keeps track of the current PGDIR in the thread struct */ + #ifdef CONFIG_PPC32 + tsk->thread.pgdir = next->pgd; +diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h +index 49c0a5a80efa..68e087e807f8 100644 +--- a/arch/powerpc/include/asm/pgtable-be-types.h ++++ b/arch/powerpc/include/asm/pgtable-be-types.h +@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) + unsigned long *p = (unsigned long *)ptep; + __be64 prev; + ++ /* See comment in switch_mm_irqs_off() */ + prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old), + (__force unsigned long)pte_raw(new)); + +diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h +index e7f4f3e0fcde..41e9d0a6cbeb 100644 +--- a/arch/powerpc/include/asm/pgtable-types.h ++++ b/arch/powerpc/include/asm/pgtable-types.h +@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) + { + unsigned long *p = (unsigned long *)ptep; + ++ /* See comment in switch_mm_irqs_off() */ + return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new)); + } + #endif +diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c +index 05c98bb853cf..2f04ad1ea01c 100644 +--- a/arch/s390/kvm/sthyi.c ++++ b/arch/s390/kvm/sthyi.c +@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr) + "srl %[cc],28\n" + : [cc] "=d" (cc) + : [code] "d" (code), [addr] "a" (addr) +- : "memory", "cc"); ++ : "3", "memory", "cc"); + return cc; + } + +@@ -422,7 +422,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu) + VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr); + trace_kvm_s390_handle_sthyi(vcpu, code, addr); + +- if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK) ++ if (reg1 == reg2 || reg1 & 1 || reg2 & 1) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + + if (code & 0xffff) { +@@ -430,6 +430,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu) + goto out; + } + ++ if (addr & ~PAGE_MASK) ++ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); ++ + /* + * If the page has not yet been faulted in, we want to do that + * now and not after all the expensive calculations. +diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c +index 06981cc716b6..d04111a5c615 100644 +--- a/arch/sparc/kernel/pci_sun4v.c ++++ b/arch/sparc/kernel/pci_sun4v.c +@@ -1240,8 +1240,6 @@ static int pci_sun4v_probe(struct platform_device *op) + * ATU group, but ATU hcalls won't be available. + */ + hv_atu = false; +- pr_err(PFX "Could not register hvapi ATU err=%d\n", +- err); + } else { + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", + vatu_major, vatu_minor); +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c +index 970c1de3b86e..4c1b7ea18541 100644 +--- a/arch/x86/events/intel/rapl.c ++++ b/arch/x86/events/intel/rapl.c +@@ -161,7 +161,13 @@ static u64 rapl_timer_ms; + + static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) + { +- return rapl_pmus->pmus[topology_logical_package_id(cpu)]; ++ unsigned int pkgid = topology_logical_package_id(cpu); ++ ++ /* ++ * The unsigned check also catches the '-1' return value for non ++ * existent mappings in the topology map. ++ */ ++ return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL; + } + + static inline u64 rapl_read_counter(struct perf_event *event) +@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event) + + /* must be done before validate_group */ + pmu = cpu_to_rapl_pmu(event->cpu); ++ if (!pmu) ++ return -EINVAL; + event->cpu = pmu->cpu; + event->pmu_private = pmu; + event->hw.event_base = msr; +@@ -585,6 +593,19 @@ static int rapl_cpu_online(unsigned int cpu) + struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); + int target; + ++ if (!pmu) { ++ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); ++ if (!pmu) ++ return -ENOMEM; ++ ++ raw_spin_lock_init(&pmu->lock); ++ INIT_LIST_HEAD(&pmu->active_list); ++ pmu->pmu = &rapl_pmus->pmu; ++ pmu->timer_interval = ms_to_ktime(rapl_timer_ms); ++ rapl_hrtimer_init(pmu); ++ ++ rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; ++ } + /* + * Check if there is an online cpu in the package which collects rapl + * events already. +@@ -598,27 +619,6 @@ static int rapl_cpu_online(unsigned int cpu) + return 0; + } + +-static int rapl_cpu_prepare(unsigned int cpu) +-{ +- struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); +- +- if (pmu) +- return 0; +- +- pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); +- if (!pmu) +- return -ENOMEM; +- +- raw_spin_lock_init(&pmu->lock); +- INIT_LIST_HEAD(&pmu->active_list); +- pmu->pmu = &rapl_pmus->pmu; +- pmu->timer_interval = ms_to_ktime(rapl_timer_ms); +- pmu->cpu = -1; +- rapl_hrtimer_init(pmu); +- rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; +- return 0; +-} +- + static int rapl_check_hw_unit(bool apply_quirk) + { + u64 msr_rapl_power_unit_bits; +@@ -804,28 +804,21 @@ static int __init rapl_pmu_init(void) + * Install callbacks. Core will call them for each online cpu. + */ + +- ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP", +- rapl_cpu_prepare, NULL); +- if (ret) +- goto out; +- + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, + "AP_PERF_X86_RAPL_ONLINE", + rapl_cpu_online, rapl_cpu_offline); + if (ret) +- goto out1; ++ goto out; + + ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); + if (ret) +- goto out2; ++ goto out1; + + rapl_advertise(); + return 0; + +-out2: +- cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); + out1: +- cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); ++ cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); + out: + pr_warn("Initialization failed (%d), disabled\n", ret); + cleanup_rapl_pmus(); +@@ -836,7 +829,6 @@ module_init(rapl_pmu_init); + static void __exit intel_rapl_exit(void) + { + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); +- cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP); + perf_pmu_unregister(&rapl_pmus->pmu); + cleanup_rapl_pmus(); + } +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h +index 8e0a9fe86de4..f9dd22469388 100644 +--- a/arch/x86/include/asm/mmu_context.h ++++ b/arch/x86/include/asm/mmu_context.h +@@ -116,9 +116,7 @@ static inline int init_new_context(struct task_struct *tsk, + mm->context.execute_only_pkey = -1; + } + #endif +- init_new_context_ldt(tsk, mm); +- +- return 0; ++ return init_new_context_ldt(tsk, mm); + } + static inline void destroy_context(struct mm_struct *mm) + { +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 649d8f2c1e40..91af75e37306 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -456,7 +456,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; + cpuid_mask(&entry->ecx, CPUID_7_ECX); + /* PKU is not yet implemented for shadow paging. */ +- if (!tdp_enabled) ++ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) + entry->ecx &= ~F(PKU); + } else { + entry->ebx = 0; +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index e53bef6cf53c..0375c6024062 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -1072,6 +1072,7 @@ static int ghes_remove(struct platform_device *ghes_dev) + if (list_empty(&ghes_sci)) + unregister_acpi_hed_notifier(&ghes_notifier_sci); + mutex_unlock(&ghes_list_mutex); ++ synchronize_rcu(); + break; + case ACPI_HEST_NOTIFY_NMI: + ghes_nmi_remove(ghes); +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 79152dbc5528..51874695a730 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -1728,7 +1728,7 @@ int __init acpi_ec_dsdt_probe(void) + * functioning ECDT EC first in order to handle the events. + * https://bugzilla.kernel.org/show_bug.cgi?id=115021 + */ +-int __init acpi_ec_ecdt_start(void) ++static int __init acpi_ec_ecdt_start(void) + { + acpi_handle handle; + +@@ -1959,20 +1959,17 @@ static inline void acpi_ec_query_exit(void) + int __init acpi_ec_init(void) + { + int result; ++ int ecdt_fail, dsdt_fail; + + /* register workqueue for _Qxx evaluations */ + result = acpi_ec_query_init(); + if (result) +- goto err_exit; +- /* Now register the driver for the EC */ +- result = acpi_bus_register_driver(&acpi_ec_driver); +- if (result) +- goto err_exit; ++ return result; + +-err_exit: +- if (result) +- acpi_ec_query_exit(); +- return result; ++ /* Drivers must be started after acpi_ec_query_init() */ ++ ecdt_fail = acpi_ec_ecdt_start(); ++ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); ++ return ecdt_fail && dsdt_fail ? -ENODEV : 0; + } + + /* EC driver currently not unloadable */ +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 219b90bc0922..08b3ca0ead69 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data); + int acpi_ec_init(void); + int acpi_ec_ecdt_probe(void); + int acpi_ec_dsdt_probe(void); +-int acpi_ec_ecdt_start(void); + void acpi_ec_block_transactions(void); + void acpi_ec_unblock_transactions(void); + int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, +diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c +index 6d7ce6e12aaa..5e18ccf5ab57 100644 +--- a/drivers/acpi/ioapic.c ++++ b/drivers/acpi/ioapic.c +@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data) + struct resource *res = data; + struct resource_win win; + ++ /* ++ * We might assign this to 'res' later, make sure all pointers are ++ * cleared before the resource is added to the global list ++ */ ++ memset(&win, 0, sizeof(win)); ++ + res->flags = 0; + if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM)) + return AE_OK; +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index dd3786acba89..cf725d581cae 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -2051,7 +2051,6 @@ int __init acpi_scan_init(void) + + acpi_gpe_apply_masked_gpes(); + acpi_update_all_gpes(); +- acpi_ec_ecdt_start(); + + acpi_scan_initialized = true; + +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 3c71b982bf2a..15009b2b33c7 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -1724,8 +1724,12 @@ static void binder_transaction(struct binder_proc *proc, + list_add_tail(&t->work.entry, target_list); + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + list_add_tail(&tcomplete->entry, &thread->todo); +- if (target_wait) +- wake_up_interruptible(target_wait); ++ if (target_wait) { ++ if (reply || !(t->flags & TF_ONE_WAY)) ++ wake_up_interruptible_sync(target_wait); ++ else ++ wake_up_interruptible(target_wait); ++ } + return; + + err_get_unused_fd_failed: +@@ -2760,10 +2764,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + /*pr_info("binder_ioctl: %d:%d %x %lx\n", + proc->pid, current->pid, cmd, arg);*/ + +- if (unlikely(current->mm != proc->vma_vm_mm)) { +- pr_err("current mm mismatch proc mm\n"); +- return -EINVAL; +- } + trace_binder_ioctl(cmd, arg); + + ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); +@@ -2875,7 +2875,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) + const char *failure_string; + struct binder_buffer *buffer; + +- if (proc->tsk != current) ++ if (proc->tsk != current->group_leader) + return -EINVAL; + + if ((vma->vm_end - vma->vm_start) > SZ_4M) +@@ -2976,9 +2976,8 @@ static int binder_open(struct inode *nodp, struct file *filp) + proc = kzalloc(sizeof(*proc), GFP_KERNEL); + if (proc == NULL) + return -ENOMEM; +- get_task_struct(current); +- proc->tsk = current; +- proc->vma_vm_mm = current->mm; ++ get_task_struct(current->group_leader); ++ proc->tsk = current->group_leader; + INIT_LIST_HEAD(&proc->todo); + init_waitqueue_head(&proc->wait); + proc->default_priority = task_nice(current); +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c +index 4e19bde4bbff..34adde169a78 100644 +--- a/drivers/gpu/drm/drm_atomic.c ++++ b/drivers/gpu/drm/drm_atomic.c +@@ -1386,6 +1386,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) + if (config->funcs->atomic_check) + ret = config->funcs->atomic_check(state->dev, state); + ++ if (ret) ++ return ret; ++ + if (!state->allow_modeset) { + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) { +@@ -1396,7 +1399,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) + } + } + +- return ret; ++ return 0; + } + EXPORT_SYMBOL(drm_atomic_check_only); + +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index 465bacd0a630..48e99ab525c3 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) + struct drm_gem_object *obj = ptr; + struct drm_device *dev = obj->dev; + ++ if (dev->driver->gem_close_object) ++ dev->driver->gem_close_object(obj, file_priv); ++ + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, file_priv); + drm_vma_node_revoke(&obj->vma_node, file_priv); + +- if (dev->driver->gem_close_object) +- dev->driver->gem_close_object(obj, file_priv); +- + drm_gem_object_handle_unreference_unlocked(obj); + + return 0; +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +index 7316fc7fa0bd..a2ec6d8796a0 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +@@ -149,8 +149,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); + + /* Signal polarities */ +- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) +- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL) ++ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) ++ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) + | DSMR_DIPM_DISP | DSMR_CSPM; + rcar_du_crtc_write(rcrtc, DSMR, value); + +@@ -172,7 +172,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) + mode->crtc_vsync_start - 1); + rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1); + +- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start); ++ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1); + rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); + } + +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +index cfc302c65b0b..c58602b638e4 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +@@ -453,13 +453,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, + } + + ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector); +- of_node_put(encoder); +- of_node_put(connector); +- + if (ret && ret != -EPROBE_DEFER) + dev_warn(rcdu->dev, +- "failed to initialize encoder %s (%d), skipping\n", +- encoder->full_name, ret); ++ "failed to initialize encoder %s on output %u (%d), skipping\n", ++ of_node_full_name(encoder), output, ret); ++ ++ of_node_put(encoder); ++ of_node_put(connector); + + return ret; + } +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c +index 0b42a12171f3..b42d95f09c68 100644 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c +@@ -319,7 +319,7 @@ static void dw_i2c_plat_complete(struct device *dev) + #endif + + #ifdef CONFIG_PM +-static int dw_i2c_plat_suspend(struct device *dev) ++static int dw_i2c_plat_runtime_suspend(struct device *dev) + { + struct platform_device *pdev = to_platform_device(dev); + struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); +@@ -343,11 +343,21 @@ static int dw_i2c_plat_resume(struct device *dev) + return 0; + } + ++#ifdef CONFIG_PM_SLEEP ++static int dw_i2c_plat_suspend(struct device *dev) ++{ ++ pm_runtime_resume(dev); ++ return dw_i2c_plat_runtime_suspend(dev); ++} ++#endif ++ + static const struct dev_pm_ops dw_i2c_dev_pm_ops = { + .prepare = dw_i2c_plat_prepare, + .complete = dw_i2c_plat_complete, + SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) +- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) ++ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, ++ dw_i2c_plat_resume, ++ NULL) + }; + + #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +index 60829340a82e..b60e5d87c257 100644 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + s32 poll_value = 0; + + if (state) { +- if (!atomic_read(&st->user_requested_state)) +- return 0; + if (sensor_hub_device_open(st->hsdev)) + return -EIO; + +@@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + &report_val); + } + ++ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n", ++ st->pdev->name, state_val, report_val); ++ + sensor_hub_get_feature(st->hsdev, st->power_state.report_id, + st->power_state.index, + sizeof(state_val), &state_val); +@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) + ret = pm_runtime_get_sync(&st->pdev->dev); + else { + pm_runtime_mark_last_busy(&st->pdev->dev); ++ pm_runtime_use_autosuspend(&st->pdev->dev); + ret = pm_runtime_put_autosuspend(&st->pdev->dev); + } + if (ret < 0) { +@@ -201,8 +203,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, + /* Default to 3 seconds, but can be changed from sysfs */ + pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, + 3000); +- pm_runtime_use_autosuspend(&attrb->pdev->dev); +- + return ret; + error_unreg_trigger: + iio_trigger_unregister(trig); +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c +index 8cf84d3488b2..12898424d838 100644 +--- a/drivers/iio/imu/adis16480.c ++++ b/drivers/iio/imu/adis16480.c +@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { + .gyro_max_val = IIO_RAD_TO_DEGREE(22500), + .gyro_max_scale = 450, + .accel_max_val = IIO_M_S_2_TO_G(12500), +- .accel_max_scale = 5, ++ .accel_max_scale = 10, + }, + [ADIS16485] = { + .channels = adis16485_channels, +diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c +index 518e8a7bd5f9..f26807c75be4 100644 +--- a/drivers/input/mouse/alps.c ++++ b/drivers/input/mouse/alps.c +@@ -1212,14 +1212,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f, + + case SS4_PACKET_ID_TWO: + if (priv->flags & ALPS_BUTTONPAD) { +- f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); ++ if (IS_SS4PLUS_DEV(priv->dev_id)) { ++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); ++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); ++ } else { ++ f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); ++ f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); ++ } + f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); +- f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); + f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); + } else { +- f->mt[0].x = SS4_STD_MF_X_V2(p, 0); ++ if (IS_SS4PLUS_DEV(priv->dev_id)) { ++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); ++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); ++ } else { ++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0); ++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1); ++ } + f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); +- f->mt[1].x = SS4_STD_MF_X_V2(p, 1); + f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); + } + f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; +@@ -1236,16 +1246,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f, + + case SS4_PACKET_ID_MULTI: + if (priv->flags & ALPS_BUTTONPAD) { +- f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); ++ if (IS_SS4PLUS_DEV(priv->dev_id)) { ++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); ++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); ++ } else { ++ f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); ++ f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); ++ } ++ + f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); +- f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); + f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); + no_data_x = SS4_MFPACKET_NO_AX_BL; + no_data_y = SS4_MFPACKET_NO_AY_BL; + } else { +- f->mt[2].x = SS4_STD_MF_X_V2(p, 0); ++ if (IS_SS4PLUS_DEV(priv->dev_id)) { ++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); ++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); ++ } else { ++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0); ++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1); ++ } + f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); +- f->mt[3].x = SS4_STD_MF_X_V2(p, 1); + f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); + no_data_x = SS4_MFPACKET_NO_AX; + no_data_y = SS4_MFPACKET_NO_AY; +@@ -2535,8 +2556,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, + + memset(otp, 0, sizeof(otp)); + +- if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || +- alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) ++ if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) || ++ alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0])) + return -1; + + alps_update_device_area_ss4_v2(otp, priv); +diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h +index dbfd26073e1a..793123717145 100644 +--- a/drivers/input/mouse/alps.h ++++ b/drivers/input/mouse/alps.h +@@ -91,6 +91,10 @@ enum SS4_PACKET_ID { + ((_b[1 + _i * 3] << 5) & 0x1F00) \ + ) + ++#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \ ++ ((_b[1 + (_i) * 3] << 4) & 0x0F80) \ ++ ) ++ + #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ + ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ + ((_b[2 + (_i) * 3] << 4) & 0x0E00) \ +@@ -100,6 +104,10 @@ enum SS4_PACKET_ID { + ((_b[0 + (_i) * 3] >> 3) & 0x0010) \ + ) + ++#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \ ++ ((_b[0 + (_i) * 3] >> 4) & 0x0008) \ ++ ) ++ + #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ + ((_b[0 + (_i) * 3] >> 3) & 0x0008) \ + ) +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 98d4e515587a..681dce15fbc8 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1234,6 +1234,7 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0000", 0 }, + { "ELAN0100", 0 }, + { "ELAN0600", 0 }, ++ { "ELAN0602", 0 }, + { "ELAN0605", 0 }, + { "ELAN0608", 0 }, + { "ELAN0605", 0 }, +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c +index 354d47ecd66a..ce6ff9b301bb 100644 +--- a/drivers/input/mouse/trackpoint.c ++++ b/drivers/input/mouse/trackpoint.c +@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir + if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) + return -1; + +- if (param[0] != TP_MAGIC_IDENT) ++ /* add new TP ID. */ ++ if (!(param[0] & TP_MAGIC_IDENT)) + return -1; + + if (firmware_id) +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h +index 5617ed3a7d7a..88055755f82e 100644 +--- a/drivers/input/mouse/trackpoint.h ++++ b/drivers/input/mouse/trackpoint.h +@@ -21,8 +21,9 @@ + #define TP_COMMAND 0xE2 /* Commands start with this */ + + #define TP_READ_ID 0xE1 /* Sent for device identification */ +-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ ++#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ + /* by the firmware ID */ ++ /* Firmware ID includes 0x1, 0x2, 0x3 */ + + + /* +diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c +index c9f386213e9e..410c39c62dc7 100644 +--- a/drivers/leds/trigger/ledtrig-heartbeat.c ++++ b/drivers/leds/trigger/ledtrig-heartbeat.c +@@ -19,7 +19,6 @@ + #include <linux/sched.h> + #include <linux/leds.h> + #include <linux/reboot.h> +-#include <linux/suspend.h> + #include "../leds.h" + + static int panic_heartbeats; +@@ -155,30 +154,6 @@ static struct led_trigger heartbeat_led_trigger = { + .deactivate = heartbeat_trig_deactivate, + }; + +-static int heartbeat_pm_notifier(struct notifier_block *nb, +- unsigned long pm_event, void *unused) +-{ +- int rc; +- +- switch (pm_event) { +- case PM_SUSPEND_PREPARE: +- case PM_HIBERNATION_PREPARE: +- case PM_RESTORE_PREPARE: +- led_trigger_unregister(&heartbeat_led_trigger); +- break; +- case PM_POST_SUSPEND: +- case PM_POST_HIBERNATION: +- case PM_POST_RESTORE: +- rc = led_trigger_register(&heartbeat_led_trigger); +- if (rc) +- pr_err("could not re-register heartbeat trigger\n"); +- break; +- default: +- break; +- } +- return NOTIFY_DONE; +-} +- + static int heartbeat_reboot_notifier(struct notifier_block *nb, + unsigned long code, void *unused) + { +@@ -193,10 +168,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb, + return NOTIFY_DONE; + } + +-static struct notifier_block heartbeat_pm_nb = { +- .notifier_call = heartbeat_pm_notifier, +-}; +- + static struct notifier_block heartbeat_reboot_nb = { + .notifier_call = heartbeat_reboot_notifier, + }; +@@ -213,14 +184,12 @@ static int __init heartbeat_trig_init(void) + atomic_notifier_chain_register(&panic_notifier_list, + &heartbeat_panic_nb); + register_reboot_notifier(&heartbeat_reboot_nb); +- register_pm_notifier(&heartbeat_pm_nb); + } + return rc; + } + + static void __exit heartbeat_trig_exit(void) + { +- unregister_pm_notifier(&heartbeat_pm_nb); + unregister_reboot_notifier(&heartbeat_reboot_nb); + atomic_notifier_chain_unregister(&panic_notifier_list, + &heartbeat_panic_nb); +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c +index 551786f58e59..ba652d8a2b93 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c +@@ -430,7 +430,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) + /* Virtual PCI function needs to determine UAR page size from + * firmware. Only master PCI function can set the uar page size + */ +- if (enable_4k_uar) ++ if (enable_4k_uar || !dev->persist->num_vfs) + dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; + else + dev->uar_page_shift = PAGE_SHIFT; +@@ -2269,7 +2269,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) + + dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; + +- if (enable_4k_uar) { ++ if (enable_4k_uar || !dev->persist->num_vfs) { + init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; + init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +index aee3fd2b6538..4ca82bd8c4f0 100644 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +@@ -871,8 +871,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) + return NETDEV_TX_OK; + + err_unmap: +- --f; +- while (f >= 0) { ++ while (--f >= 0) { + frag = &skb_shinfo(skb)->frags[f]; + dma_unmap_page(&nn->pdev->dev, + tx_ring->txbufs[wr_idx].dma_addr, +diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c +index c234ee43b6ef..24222a5d8df2 100644 +--- a/drivers/ntb/ntb_transport.c ++++ b/drivers/ntb/ntb_transport.c +@@ -176,14 +176,12 @@ struct ntb_transport_qp { + u64 rx_err_ver; + u64 rx_memcpy; + u64 rx_async; +- u64 dma_rx_prep_err; + u64 tx_bytes; + u64 tx_pkts; + u64 tx_ring_full; + u64 tx_err_no_buf; + u64 tx_memcpy; + u64 tx_async; +- u64 dma_tx_prep_err; + }; + + struct ntb_transport_mw { +@@ -256,8 +254,6 @@ enum { + #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) + #define NTB_QP_DEF_NUM_ENTRIES 100 + #define NTB_LINK_DOWN_TIMEOUT 10 +-#define DMA_RETRIES 20 +-#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) + + static void ntb_transport_rxc_db(unsigned long data); + static const struct ntb_ctx_ops ntb_transport_ops; +@@ -518,12 +514,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "free tx - \t%u\n", + ntb_transport_tx_free_entry(qp)); +- out_offset += snprintf(buf + out_offset, out_count - out_offset, +- "DMA tx prep err - \t%llu\n", +- qp->dma_tx_prep_err); +- out_offset += snprintf(buf + out_offset, out_count - out_offset, +- "DMA rx prep err - \t%llu\n", +- qp->dma_rx_prep_err); + + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "\n"); +@@ -625,7 +615,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, + if (!mw->virt_addr) + return -ENOMEM; + +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) ++ if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; + else + num_qps_mw = qp_count / mw_count; +@@ -770,8 +760,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) + qp->tx_err_no_buf = 0; + qp->tx_memcpy = 0; + qp->tx_async = 0; +- qp->dma_tx_prep_err = 0; +- qp->dma_rx_prep_err = 0; + } + + static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) +@@ -933,10 +921,8 @@ static void ntb_transport_link_work(struct work_struct *work) + ntb_free_mw(nt, i); + + /* if there's an actual failure, we should just bail */ +- if (rc < 0) { +- ntb_link_disable(ndev); ++ if (rc < 0) + return; +- } + + out: + if (ntb_link_is_up(ndev, NULL, NULL) == 1) +@@ -1002,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, + qp->event_handler = NULL; + ntb_qp_link_down_reset(qp); + +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) ++ if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; + else + num_qps_mw = qp_count / mw_count; +@@ -1125,8 +1111,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) + qp_count = ilog2(qp_bitmap); + if (max_num_clients && max_num_clients < qp_count) + qp_count = max_num_clients; +- else if (mw_count < qp_count) +- qp_count = mw_count; ++ else if (nt->mw_count < qp_count) ++ qp_count = nt->mw_count; + + qp_bitmap &= BIT_ULL(qp_count) - 1; + +@@ -1314,7 +1300,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) + struct dmaengine_unmap_data *unmap; + dma_cookie_t cookie; + void *buf = entry->buf; +- int retries = 0; + + len = entry->len; + device = chan->device; +@@ -1343,22 +1328,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) + + unmap->from_cnt = 1; + +- for (retries = 0; retries < DMA_RETRIES; retries++) { +- txd = device->device_prep_dma_memcpy(chan, +- unmap->addr[1], +- unmap->addr[0], len, +- DMA_PREP_INTERRUPT); +- if (txd) +- break; +- +- set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(DMA_OUT_RESOURCE_TO); +- } +- +- if (!txd) { +- qp->dma_rx_prep_err++; ++ txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], ++ unmap->addr[0], len, ++ DMA_PREP_INTERRUPT); ++ if (!txd) + goto err_get_unmap; +- } + + txd->callback_result = ntb_rx_copy_callback; + txd->callback_param = entry; +@@ -1603,7 +1577,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, + struct dmaengine_unmap_data *unmap; + dma_addr_t dest; + dma_cookie_t cookie; +- int retries = 0; + + device = chan->device; + dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; +@@ -1625,21 +1598,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, + + unmap->to_cnt = 1; + +- for (retries = 0; retries < DMA_RETRIES; retries++) { +- txd = device->device_prep_dma_memcpy(chan, dest, +- unmap->addr[0], len, +- DMA_PREP_INTERRUPT); +- if (txd) +- break; +- +- set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(DMA_OUT_RESOURCE_TO); +- } +- +- if (!txd) { +- qp->dma_tx_prep_err++; ++ txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, ++ DMA_PREP_INTERRUPT); ++ if (!txd) + goto err_get_unmap; +- } + + txd->callback_result = ntb_tx_copy_callback; + txd->callback_param = entry; +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index b432153a6c5a..0f63a36a519e 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ ++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ + {} /* Terminating entry */ + }; + +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 789ff1df2d8d..581712534c93 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -183,15 +183,20 @@ build_path_from_dentry(struct dentry *direntry) + } + + /* ++ * Don't allow path components longer than the server max. + * Don't allow the separator character in a path component. + * The VFS will not allow "/", but "\" is allowed by posix. + */ + static int +-check_name(struct dentry *direntry) ++check_name(struct dentry *direntry, struct cifs_tcon *tcon) + { + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); + int i; + ++ if (unlikely(direntry->d_name.len > ++ tcon->fsAttrInfo.MaxPathNameComponentLength)) ++ return -ENAMETOOLONG; ++ + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { + for (i = 0; i < direntry->d_name.len; i++) { + if (direntry->d_name.name[i] == '\\') { +@@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, + return finish_no_open(file, res); + } + +- rc = check_name(direntry); +- if (rc) +- return rc; +- + xid = get_xid(); + + cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", +@@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, + } + + tcon = tlink_tcon(tlink); ++ ++ rc = check_name(direntry, tcon); ++ if (rc) ++ goto out_free_xid; ++ + server = tcon->ses->server; + + if (server->ops->new_lease_key) +@@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, + } + pTcon = tlink_tcon(tlink); + +- rc = check_name(direntry); ++ rc = check_name(direntry, pTcon); + if (rc) + goto lookup_out; + +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 7c1c6c39d582..0437e5fdba56 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -2930,8 +2930,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, + kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * + le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); + kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); +- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); +- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); ++ kst->f_bfree = kst->f_bavail = ++ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); + return; + } + +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 4e7a56a0a9b6..2c4f7a22e128 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -129,7 +129,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp) + argp->p = page_address(argp->pagelist[0]); + argp->pagelist++; + if (argp->pagelen < PAGE_SIZE) { +- argp->end = argp->p + (argp->pagelen>>2); ++ argp->end = argp->p + XDR_QUADLEN(argp->pagelen); + argp->pagelen = 0; + } else { + argp->end = argp->p + (PAGE_SIZE>>2); +@@ -1246,9 +1246,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) + argp->pagelen -= pages * PAGE_SIZE; + len -= pages * PAGE_SIZE; + +- argp->p = (__be32 *)page_address(argp->pagelist[0]); +- argp->pagelist++; +- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); ++ next_decode_page(argp); + } + argp->p += XDR_QUADLEN(len); + +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index 31e1d639abed..dc81e5287ebf 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -59,6 +59,22 @@ + /* Align . to a 8 byte boundary equals to maximum function alignment. */ + #define ALIGN_FUNCTION() . = ALIGN(8) + ++/* ++ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which ++ * generates .data.identifier sections, which need to be pulled in with ++ * .data. We don't want to pull in .data..other sections, which Linux ++ * has defined. Same for text and bss. ++ */ ++#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION ++#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* ++#define DATA_MAIN .data .data.[0-9a-zA-Z_]* ++#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* ++#else ++#define TEXT_MAIN .text ++#define DATA_MAIN .data ++#define BSS_MAIN .bss ++#endif ++ + /* + * Align to a 32 byte boundary equal to the + * alignment gcc 4.5 uses for a struct +@@ -198,12 +214,9 @@ + + /* + * .data section +- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates +- * .data.identifier which needs to be pulled in with .data, but don't want to +- * pull in .data..stuff which has its own requirements. Same for bss. + */ + #define DATA_DATA \ +- *(.data .data.[0-9a-zA-Z_]*) \ ++ *(DATA_MAIN) \ + *(.ref.data) \ + *(.data..shared_aligned) /* percpu related */ \ + MEM_KEEP(init.data) \ +@@ -436,16 +449,17 @@ + VMLINUX_SYMBOL(__security_initcall_end) = .; \ + } + +-/* .text section. Map to function alignment to avoid address changes ++/* ++ * .text section. Map to function alignment to avoid address changes + * during second ld run in second ld pass when generating System.map +- * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates +- * .text.identifier which needs to be pulled in with .text , but some +- * architectures define .text.foo which is not intended to be pulled in here. +- * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have +- * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ ++ * ++ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead ++ * code elimination is enabled, so these sections should be converted ++ * to use ".." first. ++ */ + #define TEXT_TEXT \ + ALIGN_FUNCTION(); \ +- *(.text.hot .text .text.fixup .text.unlikely) \ ++ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ + *(.ref.text) \ + MEM_KEEP(init.text) \ + MEM_KEEP(exit.text) \ +@@ -613,7 +627,7 @@ + BSS_FIRST_SECTIONS \ + *(.bss..page_aligned) \ + *(.dynbss) \ +- *(.bss .bss.[0-9a-zA-Z_]*) \ ++ *(BSS_MAIN) \ + *(COMMON) \ + } + +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h +index a13b031dc6b8..3101141661a1 100644 +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -40,6 +40,7 @@ struct bpf_reg_state { + */ + s64 min_value; + u64 max_value; ++ bool value_from_signed; + }; + + enum bpf_stack_slot_type { +diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h +index ba1cad7b97cf..965cc5693a46 100644 +--- a/include/linux/cpuhotplug.h ++++ b/include/linux/cpuhotplug.h +@@ -10,7 +10,6 @@ enum cpuhp_state { + CPUHP_PERF_X86_PREPARE, + CPUHP_PERF_X86_UNCORE_PREP, + CPUHP_PERF_X86_AMD_UNCORE_PREP, +- CPUHP_PERF_X86_RAPL_PREP, + CPUHP_PERF_BFIN, + CPUHP_PERF_POWER, + CPUHP_PERF_SUPERH, +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 2f63d44368bd..dd88ded27fc8 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -941,9 +941,9 @@ static inline struct file *get_file(struct file *f) + /* Page cache limit. The filesystems should put that into their s_maxbytes + limits, otherwise bad things can happen in VM. */ + #if BITS_PER_LONG==32 +-#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) ++#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) + #elif BITS_PER_LONG==64 +-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) ++#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) + #endif + + #define FL_POSIX 1 +diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h +index 6c70444da3b9..b83507c0640c 100644 +--- a/include/linux/ptr_ring.h ++++ b/include/linux/ptr_ring.h +@@ -340,9 +340,9 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) + __PTR_RING_PEEK_CALL_v; \ + }) + +-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) ++static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) + { +- return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); ++ return kcalloc(size, sizeof(void *), gfp); + } + + static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +@@ -417,7 +417,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, ++static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, ++ unsigned int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) + { +@@ -425,7 +426,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, + void ***queues; + int i; + +- queues = kmalloc(nrings * sizeof *queues, gfp); ++ queues = kmalloc_array(nrings, sizeof(*queues), gfp); + if (!queues) + goto noqueues; + +diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h +index f4dfade428f0..be8b902b5845 100644 +--- a/include/linux/skb_array.h ++++ b/include/linux/skb_array.h +@@ -162,7 +162,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) + } + + static inline int skb_array_resize_multiple(struct skb_array **rings, +- int nrings, int size, gfp_t gfp) ++ int nrings, unsigned int size, ++ gfp_t gfp) + { + BUILD_BUG_ON(offsetof(struct skb_array, ring)); + return ptr_ring_resize_multiple((struct ptr_ring **)rings, +diff --git a/include/net/ip.h b/include/net/ip.h +index d3a107850a41..51c6b9786c46 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -339,7 +339,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, + !forwarding) + return dst_mtu(dst); + +- return min(dst->dev->mtu, IP_MAX_MTU); ++ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); + } + + static inline unsigned int ip_skb_dst_mtu(struct sock *sk, +@@ -351,7 +351,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk, + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); + } + +- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); ++ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); + } + + u32 ip_idents_reserve(u32 hash, int segs); +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index e6aa0a249672..f18fc1a0321f 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -768,8 +768,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + old = *pold; + *pold = new; + if (old != NULL) { +- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); ++ unsigned int qlen = old->q.qlen; ++ unsigned int backlog = old->qstats.backlog; ++ + qdisc_reset(old); ++ qdisc_tree_reduce_backlog(old, qlen, backlog); + } + sch_tree_unlock(sch); + +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 8ce679d36c58..779c871c5dcd 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -139,7 +139,7 @@ struct bpf_verifier_stack_elem { + struct bpf_verifier_stack_elem *next; + }; + +-#define BPF_COMPLEXITY_LIMIT_INSNS 65536 ++#define BPF_COMPLEXITY_LIMIT_INSNS 98304 + #define BPF_COMPLEXITY_LIMIT_STACK 1024 + + struct bpf_call_arg_meta { +@@ -682,12 +682,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size, + return -EACCES; + } + +-static bool is_pointer_value(struct bpf_verifier_env *env, int regno) ++static bool __is_pointer_value(bool allow_ptr_leaks, ++ const struct bpf_reg_state *reg) + { +- if (env->allow_ptr_leaks) ++ if (allow_ptr_leaks) + return false; + +- switch (env->cur_state.regs[regno].type) { ++ switch (reg->type) { + case UNKNOWN_VALUE: + case CONST_IMM: + return false; +@@ -696,6 +697,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) + } + } + ++static bool is_pointer_value(struct bpf_verifier_env *env, int regno) ++{ ++ return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]); ++} ++ + static int check_ptr_alignment(struct bpf_verifier_env *env, + struct bpf_reg_state *reg, int off, int size) + { +@@ -1467,6 +1473,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) + return 0; + } + ++static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env, ++ struct bpf_insn *insn) ++{ ++ struct bpf_reg_state *regs = env->cur_state.regs; ++ struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; ++ struct bpf_reg_state *src_reg = ®s[insn->src_reg]; ++ u8 opcode = BPF_OP(insn->code); ++ s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm); ++ ++ /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */ ++ if (src_reg->imm > 0 && dst_reg->imm) { ++ switch (opcode) { ++ case BPF_ADD: ++ /* dreg += sreg ++ * where both have zero upper bits. Adding them ++ * can only result making one more bit non-zero ++ * in the larger value. ++ * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) ++ * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) ++ */ ++ dst_reg->imm = min(src_reg->imm, 63 - imm_log2); ++ dst_reg->imm--; ++ break; ++ case BPF_AND: ++ /* dreg &= sreg ++ * AND can not extend zero bits only shrink ++ * Ex. 0x00..00ffffff ++ * & 0x0f..ffffffff ++ * ---------------- ++ * 0x00..00ffffff ++ */ ++ dst_reg->imm = max(src_reg->imm, 63 - imm_log2); ++ break; ++ case BPF_OR: ++ /* dreg |= sreg ++ * OR can only extend zero bits ++ * Ex. 0x00..00ffffff ++ * | 0x0f..ffffffff ++ * ---------------- ++ * 0x0f..00ffffff ++ */ ++ dst_reg->imm = min(src_reg->imm, 63 - imm_log2); ++ break; ++ case BPF_SUB: ++ case BPF_MUL: ++ case BPF_RSH: ++ case BPF_LSH: ++ /* These may be flushed out later */ ++ default: ++ mark_reg_unknown_value(regs, insn->dst_reg); ++ } ++ } else { ++ mark_reg_unknown_value(regs, insn->dst_reg); ++ } ++ ++ dst_reg->type = UNKNOWN_VALUE; ++ return 0; ++} ++ + static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, + struct bpf_insn *insn) + { +@@ -1475,6 +1540,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, + struct bpf_reg_state *src_reg = ®s[insn->src_reg]; + u8 opcode = BPF_OP(insn->code); + ++ if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE) ++ return evaluate_reg_imm_alu_unknown(env, insn); ++ + /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn. + * Don't care about overflow or negative values, just add them + */ +@@ -1530,10 +1598,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, + } + + /* We don't know anything about what was done to this register, mark it +- * as unknown. ++ * as unknown. Also, if both derived bounds came from signed/unsigned ++ * mixed compares and one side is unbounded, we cannot really do anything ++ * with them as boundaries cannot be trusted. Thus, arithmetic of two ++ * regs of such kind will get invalidated bounds on the dst side. + */ +- if (min_val == BPF_REGISTER_MIN_RANGE && +- max_val == BPF_REGISTER_MAX_RANGE) { ++ if ((min_val == BPF_REGISTER_MIN_RANGE && ++ max_val == BPF_REGISTER_MAX_RANGE) || ++ (BPF_SRC(insn->code) == BPF_X && ++ ((min_val != BPF_REGISTER_MIN_RANGE && ++ max_val == BPF_REGISTER_MAX_RANGE) || ++ (min_val == BPF_REGISTER_MIN_RANGE && ++ max_val != BPF_REGISTER_MAX_RANGE) || ++ (dst_reg->min_value != BPF_REGISTER_MIN_RANGE && ++ dst_reg->max_value == BPF_REGISTER_MAX_RANGE) || ++ (dst_reg->min_value == BPF_REGISTER_MIN_RANGE && ++ dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) && ++ regs[insn->dst_reg].value_from_signed != ++ regs[insn->src_reg].value_from_signed)) { + reset_reg_range_values(regs, insn->dst_reg); + return; + } +@@ -1542,10 +1624,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, + * do our normal operations to the register, we need to set the values + * to the min/max since they are undefined. + */ +- if (min_val == BPF_REGISTER_MIN_RANGE) +- dst_reg->min_value = BPF_REGISTER_MIN_RANGE; +- if (max_val == BPF_REGISTER_MAX_RANGE) +- dst_reg->max_value = BPF_REGISTER_MAX_RANGE; ++ if (opcode != BPF_SUB) { ++ if (min_val == BPF_REGISTER_MIN_RANGE) ++ dst_reg->min_value = BPF_REGISTER_MIN_RANGE; ++ if (max_val == BPF_REGISTER_MAX_RANGE) ++ dst_reg->max_value = BPF_REGISTER_MAX_RANGE; ++ } + + switch (opcode) { + case BPF_ADD: +@@ -1555,10 +1639,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, + dst_reg->max_value += max_val; + break; + case BPF_SUB: ++ /* If one of our values was at the end of our ranges, then the ++ * _opposite_ value in the dst_reg goes to the end of our range. ++ */ ++ if (min_val == BPF_REGISTER_MIN_RANGE) ++ dst_reg->max_value = BPF_REGISTER_MAX_RANGE; ++ if (max_val == BPF_REGISTER_MAX_RANGE) ++ dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) +- dst_reg->min_value -= min_val; ++ dst_reg->min_value -= max_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) +- dst_reg->max_value -= max_val; ++ dst_reg->max_value -= min_val; + break; + case BPF_MUL: + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) +@@ -1808,6 +1899,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) + * register as unknown. + */ + if (env->allow_ptr_leaks && ++ BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD && + (dst_reg->type == PTR_TO_MAP_VALUE || + dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) + dst_reg->type = PTR_TO_MAP_VALUE_ADJ; +@@ -1876,38 +1968,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, + struct bpf_reg_state *false_reg, u64 val, + u8 opcode) + { ++ bool value_from_signed = true; ++ bool is_range = true; ++ + switch (opcode) { + case BPF_JEQ: + /* If this is false then we know nothing Jon Snow, but if it is + * true then we know for sure. + */ + true_reg->max_value = true_reg->min_value = val; ++ is_range = false; + break; + case BPF_JNE: + /* If this is true we know nothing Jon Snow, but if it is false + * we know the value for sure; + */ + false_reg->max_value = false_reg->min_value = val; ++ is_range = false; + break; + case BPF_JGT: +- /* Unsigned comparison, the minimum value is 0. */ +- false_reg->min_value = 0; ++ value_from_signed = false; ++ /* fallthrough */ + case BPF_JSGT: ++ if (true_reg->value_from_signed != value_from_signed) ++ reset_reg_range_values(true_reg, 0); ++ if (false_reg->value_from_signed != value_from_signed) ++ reset_reg_range_values(false_reg, 0); ++ if (opcode == BPF_JGT) { ++ /* Unsigned comparison, the minimum value is 0. */ ++ false_reg->min_value = 0; ++ } + /* If this is false then we know the maximum val is val, + * otherwise we know the min val is val+1. + */ + false_reg->max_value = val; ++ false_reg->value_from_signed = value_from_signed; + true_reg->min_value = val + 1; ++ true_reg->value_from_signed = value_from_signed; + break; + case BPF_JGE: +- /* Unsigned comparison, the minimum value is 0. */ +- false_reg->min_value = 0; ++ value_from_signed = false; ++ /* fallthrough */ + case BPF_JSGE: ++ if (true_reg->value_from_signed != value_from_signed) ++ reset_reg_range_values(true_reg, 0); ++ if (false_reg->value_from_signed != value_from_signed) ++ reset_reg_range_values(false_reg, 0); ++ if (opcode == BPF_JGE) { ++ /* Unsigned comparison, the minimum value is 0. */ ++ false_reg->min_value = 0; ++ } + /* If this is false then we know the maximum value is val - 1, + * otherwise we know the mimimum value is val. + */ + false_reg->max_value = val - 1; ++ false_reg->value_from_signed = value_from_signed; + true_reg->min_value = val; ++ true_reg->value_from_signed = value_from_signed; + break; + default: + break; +@@ -1915,6 +2032,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, + + check_reg_overflow(false_reg); + check_reg_overflow(true_reg); ++ if (is_range) { ++ if (__is_pointer_value(false, false_reg)) ++ reset_reg_range_values(false_reg, 0); ++ if (__is_pointer_value(false, true_reg)) ++ reset_reg_range_values(true_reg, 0); ++ } + } + + /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg +@@ -1924,39 +2047,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, + struct bpf_reg_state *false_reg, u64 val, + u8 opcode) + { ++ bool value_from_signed = true; ++ bool is_range = true; ++ + switch (opcode) { + case BPF_JEQ: + /* If this is false then we know nothing Jon Snow, but if it is + * true then we know for sure. + */ + true_reg->max_value = true_reg->min_value = val; ++ is_range = false; + break; + case BPF_JNE: + /* If this is true we know nothing Jon Snow, but if it is false + * we know the value for sure; + */ + false_reg->max_value = false_reg->min_value = val; ++ is_range = false; + break; + case BPF_JGT: +- /* Unsigned comparison, the minimum value is 0. */ +- true_reg->min_value = 0; ++ value_from_signed = false; ++ /* fallthrough */ + case BPF_JSGT: ++ if (true_reg->value_from_signed != value_from_signed) ++ reset_reg_range_values(true_reg, 0); ++ if (false_reg->value_from_signed != value_from_signed) ++ reset_reg_range_values(false_reg, 0); ++ if (opcode == BPF_JGT) { ++ /* Unsigned comparison, the minimum value is 0. */ ++ true_reg->min_value = 0; ++ } + /* + * If this is false, then the val is <= the register, if it is + * true the register <= to the val. + */ + false_reg->min_value = val; ++ false_reg->value_from_signed = value_from_signed; + true_reg->max_value = val - 1; ++ true_reg->value_from_signed = value_from_signed; + break; + case BPF_JGE: +- /* Unsigned comparison, the minimum value is 0. */ +- true_reg->min_value = 0; ++ value_from_signed = false; ++ /* fallthrough */ + case BPF_JSGE: ++ if (true_reg->value_from_signed != value_from_signed) ++ reset_reg_range_values(true_reg, 0); ++ if (false_reg->value_from_signed != value_from_signed) ++ reset_reg_range_values(false_reg, 0); ++ if (opcode == BPF_JGE) { ++ /* Unsigned comparison, the minimum value is 0. */ ++ true_reg->min_value = 0; ++ } + /* If this is false then constant < register, if it is true then + * the register < constant. + */ + false_reg->min_value = val + 1; ++ false_reg->value_from_signed = value_from_signed; + true_reg->max_value = val; ++ true_reg->value_from_signed = value_from_signed; + break; + default: + break; +@@ -1964,6 +2112,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, + + check_reg_overflow(false_reg); + check_reg_overflow(true_reg); ++ if (is_range) { ++ if (__is_pointer_value(false, false_reg)) ++ reset_reg_range_values(false_reg, 0); ++ if (__is_pointer_value(false, true_reg)) ++ reset_reg_range_values(true_reg, 0); ++ } + } + + static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, +@@ -2390,6 +2544,7 @@ static int check_cfg(struct bpf_verifier_env *env) + env->explored_states[t + 1] = STATE_LIST_MARK; + } else { + /* conditional jump with two edges */ ++ env->explored_states[t] = STATE_LIST_MARK; + ret = push_insn(t, t + 1, FALLTHROUGH, env); + if (ret == 1) + goto peek_stack; +@@ -2548,6 +2703,12 @@ static bool states_equal(struct bpf_verifier_env *env, + rcur->type != NOT_INIT)) + continue; + ++ /* Don't care about the reg->id in this case. */ ++ if (rold->type == PTR_TO_MAP_VALUE_OR_NULL && ++ rcur->type == PTR_TO_MAP_VALUE_OR_NULL && ++ rold->map_ptr == rcur->map_ptr) ++ continue; ++ + if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && + compare_ptrs_to_packet(rold, rcur)) + continue; +@@ -2682,6 +2843,9 @@ static int do_check(struct bpf_verifier_env *env) + goto process_bpf_exit; + } + ++ if (need_resched()) ++ cond_resched(); ++ + if (log_level && do_print_state) { + verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); + print_verifier_state(&env->cur_state); +diff --git a/kernel/events/core.c b/kernel/events/core.c +index f5a693589d66..c774773ac3a4 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -9786,28 +9786,27 @@ SYSCALL_DEFINE5(perf_event_open, + goto err_context; + + /* +- * Do not allow to attach to a group in a different +- * task or CPU context: ++ * Make sure we're both events for the same CPU; ++ * grouping events for different CPUs is broken; since ++ * you can never concurrently schedule them anyhow. + */ +- if (move_group) { +- /* +- * Make sure we're both on the same task, or both +- * per-cpu events. +- */ +- if (group_leader->ctx->task != ctx->task) +- goto err_context; ++ if (group_leader->cpu != event->cpu) ++ goto err_context; + +- /* +- * Make sure we're both events for the same CPU; +- * grouping events for different CPUs is broken; since +- * you can never concurrently schedule them anyhow. +- */ +- if (group_leader->cpu != event->cpu) +- goto err_context; +- } else { +- if (group_leader->ctx != ctx) +- goto err_context; +- } ++ /* ++ * Make sure we're both on the same task, or both ++ * per-CPU events. ++ */ ++ if (group_leader->ctx->task != ctx->task) ++ goto err_context; ++ ++ /* ++ * Do not allow to attach to a group in a different task ++ * or CPU context. If we're moving SW events, we'll fix ++ * this up later, so allow that. ++ */ ++ if (!move_group && group_leader->ctx != ctx) ++ goto err_context; + + /* + * Only a group leader can be exclusive or pinned +diff --git a/kernel/fork.c b/kernel/fork.c +index 59faac4de181..50bf262cc427 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -766,6 +766,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, + mm_init_cpumask(mm); + mm_init_aio(mm); + mm_init_owner(mm, p); ++ RCU_INIT_POINTER(mm->exe_file, NULL); + mmu_notifier_mm_init(mm); + clear_tlb_flush_pending(mm); + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 944ad64277a6..df445cde8a1e 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -201,6 +201,7 @@ struct timer_base { + bool migration_enabled; + bool nohz_active; + bool is_idle; ++ bool must_forward_clk; + DECLARE_BITMAP(pending_map, WHEEL_SIZE); + struct hlist_head vectors[WHEEL_SIZE]; + } ____cacheline_aligned; +@@ -891,13 +892,19 @@ get_target_base(struct timer_base *base, unsigned tflags) + + static inline void forward_timer_base(struct timer_base *base) + { +- unsigned long jnow = READ_ONCE(jiffies); ++ unsigned long jnow; + + /* +- * We only forward the base when it's idle and we have a delta between +- * base clock and jiffies. ++ * We only forward the base when we are idle or have just come out of ++ * idle (must_forward_clk logic), and have a delta between base clock ++ * and jiffies. In the common case, run_timers will take care of it. + */ +- if (!base->is_idle || (long) (jnow - base->clk) < 2) ++ if (likely(!base->must_forward_clk)) ++ return; ++ ++ jnow = READ_ONCE(jiffies); ++ base->must_forward_clk = base->is_idle; ++ if ((long)(jnow - base->clk) < 2) + return; + + /* +@@ -973,6 +980,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) + * same array bucket then just return: + */ + if (timer_pending(timer)) { ++ /* ++ * The downside of this optimization is that it can result in ++ * larger granularity than you would get from adding a new ++ * timer with this expiry. ++ */ + if (timer->expires == expires) + return 1; + +@@ -983,6 +995,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) + * dequeue/enqueue dance. + */ + base = lock_timer_base(timer, &flags); ++ forward_timer_base(base); + + clk = base->clk; + idx = calc_wheel_index(expires, clk); +@@ -999,6 +1012,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) + } + } else { + base = lock_timer_base(timer, &flags); ++ forward_timer_base(base); + } + + timer_stats_timer_set_start_info(timer); +@@ -1028,12 +1042,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) + spin_lock(&base->lock); + WRITE_ONCE(timer->flags, + (timer->flags & ~TIMER_BASEMASK) | base->cpu); ++ forward_timer_base(base); + } + } + +- /* Try to forward a stale timer base clock */ +- forward_timer_base(base); +- + timer->expires = expires; + /* + * If 'idx' was calculated above and the base time did not advance +@@ -1150,6 +1162,7 @@ void add_timer_on(struct timer_list *timer, int cpu) + WRITE_ONCE(timer->flags, + (timer->flags & ~TIMER_BASEMASK) | cpu); + } ++ forward_timer_base(base); + + debug_activate(timer, timer->expires); + internal_add_timer(base, timer); +@@ -1538,10 +1551,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) + if (!is_max_delta) + expires = basem + (u64)(nextevt - basej) * TICK_NSEC; + /* +- * If we expect to sleep more than a tick, mark the base idle: ++ * If we expect to sleep more than a tick, mark the base idle. ++ * Also the tick is stopped so any added timer must forward ++ * the base clk itself to keep granularity small. This idle ++ * logic is only maintained for the BASE_STD base, deferrable ++ * timers may still see large granularity skew (by design). + */ +- if ((expires - basem) > TICK_NSEC) ++ if ((expires - basem) > TICK_NSEC) { ++ base->must_forward_clk = true; + base->is_idle = true; ++ } + } + spin_unlock(&base->lock); + +@@ -1651,6 +1670,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) + { + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + ++ /* ++ * must_forward_clk must be cleared before running timers so that any ++ * timer functions that call mod_timer will not try to forward the ++ * base. idle trcking / clock forwarding logic is only used with ++ * BASE_STD timers. ++ * ++ * The deferrable base does not do idle tracking at all, so we do ++ * not forward it. This can result in very large variations in ++ * granularity for deferrable timers, but they can be deferred for ++ * long periods due to idle. ++ */ ++ base->must_forward_clk = false; ++ + __run_timers(base); + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index 5dcb99281259..41805fb3c661 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -203,10 +203,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, + fmt_cnt++; + } + +- return __trace_printk(1/* fake ip will not be printed */, fmt, +- mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1, +- mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2, +- mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3); ++/* Horrid workaround for getting va_list handling working with different ++ * argument type combinations generically for 32 and 64 bit archs. ++ */ ++#define __BPF_TP_EMIT() __BPF_ARG3_TP() ++#define __BPF_TP(...) \ ++ __trace_printk(1 /* Fake ip will not be printed. */, \ ++ fmt, ##__VA_ARGS__) ++ ++#define __BPF_ARG1_TP(...) \ ++ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ ++ ? __BPF_TP(arg1, ##__VA_ARGS__) \ ++ : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ ++ ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ ++ : __BPF_TP((u32)arg1, ##__VA_ARGS__))) ++ ++#define __BPF_ARG2_TP(...) \ ++ ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ ++ ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ ++ : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ ++ ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ ++ : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) ++ ++#define __BPF_ARG3_TP(...) \ ++ ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ ++ ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ ++ : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ ++ ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ ++ : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) ++ ++ return __BPF_TP_EMIT(); + } + + static const struct bpf_func_proto bpf_trace_printk_proto = { +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 4f7ea8446bb5..6e432ed7d0fe 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -876,6 +876,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace) + + function_profile_call(trace->func, 0, NULL, NULL); + ++ /* If function graph is shutting down, ret_stack can be NULL */ ++ if (!current->ret_stack) ++ return 0; ++ + if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) + current->ret_stack[index].subtime = 0; + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 53c308068e39..7379f735a9f4 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -7767,4 +7767,4 @@ __init static int clear_boot_tracer(void) + } + + fs_initcall(tracer_init_tracefs); +-late_initcall(clear_boot_tracer); ++late_initcall_sync(clear_boot_tracer); +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c +index 9daa9b3bc6d9..0193f58c45f0 100644 +--- a/kernel/trace/trace_events_filter.c ++++ b/kernel/trace/trace_events_filter.c +@@ -1926,6 +1926,10 @@ static int create_filter(struct trace_event_call *call, + if (err && set_str) + append_filter_err(ps, filter); + } ++ if (err && !set_str) { ++ free_event_filter(filter); ++ filter = NULL; ++ } + create_filter_finish(ps); + + *filterp = filter; +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c +index 0a689bbb78ef..305039b122fa 100644 +--- a/kernel/trace/tracing_map.c ++++ b/kernel/trace/tracing_map.c +@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a) + if (!a) + return; + +- if (!a->pages) { +- kfree(a); +- return; +- } ++ if (!a->pages) ++ goto free; + + for (i = 0; i < a->n_pages; i++) { + if (!a->pages[i]) + break; + free_page((unsigned long)a->pages[i]); + } ++ ++ kfree(a->pages); ++ ++ free: ++ kfree(a); + } + + struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, +diff --git a/mm/madvise.c b/mm/madvise.c +index 253b1533fba5..63a12162f4c6 100644 +--- a/mm/madvise.c ++++ b/mm/madvise.c +@@ -331,8 +331,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, + pte_offset_map_lock(mm, pmd, addr, &ptl); + goto out; + } +- put_page(page); + unlock_page(page); ++ put_page(page); + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + pte--; + addr -= PAGE_SIZE; +diff --git a/mm/memblock.c b/mm/memblock.c +index ccec42c12ba8..42b98af6a415 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -311,7 +311,7 @@ void __init memblock_discard(void) + __memblock_free_late(addr, size); + } + +- if (memblock.memory.regions == memblock_memory_init_regions) { ++ if (memblock.memory.regions != memblock_memory_init_regions) { + addr = __pa(memblock.memory.regions); + size = PAGE_ALIGN(sizeof(struct memblock_region) * + memblock.memory.max); +diff --git a/mm/shmem.c b/mm/shmem.c +index 7ee5444ffb6d..004e0f87e8a8 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -3810,7 +3810,7 @@ int __init shmem_init(void) + } + + #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +- if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) ++ if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) + SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; + else + shmem_huge = 0; /* just in case it was patched */ +@@ -3871,7 +3871,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, + return -EINVAL; + + shmem_huge = huge; +- if (shmem_huge < SHMEM_HUGE_DENY) ++ if (shmem_huge > SHMEM_HUGE_DENY) + SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; + return count; + } +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c +index fbf251fef70f..4d6b94d7ce5f 100644 +--- a/net/bluetooth/bnep/core.c ++++ b/net/bluetooth/bnep/core.c +@@ -484,16 +484,16 @@ static int bnep_session(void *arg) + struct net_device *dev = s->dev; + struct sock *sk = s->sock->sk; + struct sk_buff *skb; +- wait_queue_t wait; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG(""); + + set_user_nice(current, -15); + +- init_waitqueue_entry(&wait, current); + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +- set_current_state(TASK_INTERRUPTIBLE); ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + + if (atomic_read(&s->terminate)) + break; +@@ -515,9 +515,8 @@ static int bnep_session(void *arg) + break; + netif_wake_queue(dev); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } +- __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + /* Cleanup session */ +@@ -666,7 +665,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) + s = __bnep_get_session(req->dst); + if (s) { + atomic_inc(&s->terminate); +- wake_up_process(s->task); ++ wake_up_interruptible(sk_sleep(s->sock->sk)); + } else + err = -ENOENT; + +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c +index 9e59b6654126..1152ce34dad4 100644 +--- a/net/bluetooth/cmtp/core.c ++++ b/net/bluetooth/cmtp/core.c +@@ -280,16 +280,16 @@ static int cmtp_session(void *arg) + struct cmtp_session *session = arg; + struct sock *sk = session->sock->sk; + struct sk_buff *skb; +- wait_queue_t wait; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG("session %p", session); + + set_user_nice(current, -15); + +- init_waitqueue_entry(&wait, current); + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +- set_current_state(TASK_INTERRUPTIBLE); ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + + if (atomic_read(&session->terminate)) + break; +@@ -306,9 +306,8 @@ static int cmtp_session(void *arg) + + cmtp_process_transmit(session); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } +- __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + down_write(&cmtp_session_sem); +@@ -393,7 +392,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) + err = cmtp_attach_device(session); + if (err < 0) { + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ wake_up_interruptible(sk_sleep(session->sock->sk)); + up_write(&cmtp_session_sem); + return err; + } +@@ -431,7 +430,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) + + /* Stop session thread */ + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ ++ /* Ensure session->terminate is updated */ ++ smp_mb__after_atomic(); ++ ++ wake_up_interruptible(sk_sleep(session->sock->sk)); + } else + err = -ENOENT; + +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index 0bec4588c3c8..1fc076420d1e 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -36,6 +36,7 @@ + #define VERSION "1.2" + + static DECLARE_RWSEM(hidp_session_sem); ++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq); + static LIST_HEAD(hidp_session_list); + + static unsigned char hidp_keycode[256] = { +@@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session) + * Wake up session thread and notify it to stop. This is asynchronous and + * returns immediately. Call this whenever a runtime error occurs and you want + * the session to stop. +- * Note: wake_up_process() performs any necessary memory-barriers for us. ++ * Note: wake_up_interruptible() performs any necessary memory-barriers for us. + */ + static void hidp_session_terminate(struct hidp_session *session) + { + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ wake_up_interruptible(&hidp_session_wq); + } + + /* +@@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session) + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + struct sk_buff *skb; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + ++ add_wait_queue(&hidp_session_wq, &wait); + for (;;) { + /* + * This thread can be woken up two ways: +@@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session) + * session->terminate flag and wakes this thread up. + * - Via modifying the socket state of ctrl/intr_sock. This + * thread is woken up by ->sk_state_changed(). +- * +- * Note: set_current_state() performs any necessary +- * memory-barriers for us. + */ +- set_current_state(TASK_INTERRUPTIBLE); + ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + if (atomic_read(&session->terminate)) + break; + +@@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session) + hidp_process_transmit(session, &session->ctrl_transmit, + session->ctrl_sock); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } ++ remove_wait_queue(&hidp_session_wq, &wait); + + atomic_inc(&session->terminate); +- set_current_state(TASK_RUNNING); ++ ++ /* Ensure session->terminate is updated */ ++ smp_mb__after_atomic(); ++} ++ ++static int hidp_session_wake_function(wait_queue_t *wait, ++ unsigned int mode, ++ int sync, void *key) ++{ ++ wake_up_interruptible(&hidp_session_wq); ++ return false; + } + + /* +@@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session) + static int hidp_session_thread(void *arg) + { + struct hidp_session *session = arg; +- wait_queue_t ctrl_wait, intr_wait; ++ DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function); ++ DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function); + + BT_DBG("session %p", session); + +@@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg) + set_user_nice(current, -15); + hidp_set_timer(session); + +- init_waitqueue_entry(&ctrl_wait, current); +- init_waitqueue_entry(&intr_wait, current); + add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); + add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); + /* This memory barrier is paired with wq_has_sleeper(). See +diff --git a/net/dccp/proto.c b/net/dccp/proto.c +index 9fe25bf63296..b68168fcc06a 100644 +--- a/net/dccp/proto.c ++++ b/net/dccp/proto.c +@@ -24,6 +24,7 @@ + #include <net/checksum.h> + + #include <net/inet_sock.h> ++#include <net/inet_common.h> + #include <net/sock.h> + #include <net/xfrm.h> + +@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type) + + EXPORT_SYMBOL_GPL(dccp_packet_name); + ++static void dccp_sk_destruct(struct sock *sk) ++{ ++ struct dccp_sock *dp = dccp_sk(sk); ++ ++ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); ++ dp->dccps_hc_tx_ccid = NULL; ++ inet_sock_destruct(sk); ++} ++ + int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) + { + struct dccp_sock *dp = dccp_sk(sk); +@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) + icsk->icsk_syn_retries = sysctl_dccp_request_retries; + sk->sk_state = DCCP_CLOSED; + sk->sk_write_space = dccp_write_space; ++ sk->sk_destruct = dccp_sk_destruct; + icsk->icsk_sync_mss = dccp_sync_mss; + dp->dccps_mss_cache = 536; + dp->dccps_rate_last = jiffies; +@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk) + { + struct dccp_sock *dp = dccp_sk(sk); + +- /* +- * DCCP doesn't use sk_write_queue, just sk_send_head +- * for retransmissions +- */ ++ __skb_queue_purge(&sk->sk_write_queue); + if (sk->sk_send_head != NULL) { + kfree_skb(sk->sk_send_head); + sk->sk_send_head = NULL; +@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk) + dp->dccps_hc_rx_ackvec = NULL; + } + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); +- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); +- dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; ++ dp->dccps_hc_rx_ccid = NULL; + + /* clean up feature negotiation state */ + dccp_feat_list_purge(&dp->dccps_featneg); +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 7563831fa432..38c1c979ecb1 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -1044,15 +1044,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg) + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); + if (!fi) + goto failure; +- fib_info_cnt++; + if (cfg->fc_mx) { + fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); +- if (!fi->fib_metrics) +- goto failure; ++ if (unlikely(!fi->fib_metrics)) { ++ kfree(fi); ++ return ERR_PTR(err); ++ } + atomic_set(&fi->fib_metrics->refcnt, 1); +- } else ++ } else { + fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; +- ++ } ++ fib_info_cnt++; + fi->fib_net = net; + fi->fib_protocol = cfg->fc_protocol; + fi->fib_scope = cfg->fc_scope; +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 6cd49fd17ac0..6a5b7783932e 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1247,7 +1247,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) + if (mtu) + return mtu; + +- mtu = dst->dev->mtu; ++ mtu = READ_ONCE(dst->dev->mtu); + + if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { + if (rt->rt_uses_gateway && mtu > 576) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 32c540145c17..c03850771a4e 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3036,8 +3036,7 @@ void tcp_rearm_rto(struct sock *sk) + /* delta may not be positive if the socket is locked + * when the retrans timer fires and is rescheduled. + */ +- if (delta > 0) +- rto = delta; ++ rto = max(delta, 1); + } + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, + TCP_RTO_MAX); +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index 4345ee39f180..ff389591a340 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -897,6 +897,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, + } + nsiblings = iter->rt6i_nsiblings; + fib6_purge_rt(iter, fn, info->nl_net); ++ if (fn->rr_ptr == iter) ++ fn->rr_ptr = NULL; + rt6_release(iter); + + if (nsiblings) { +@@ -909,6 +911,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, + if (rt6_qualify_for_ecmp(iter)) { + *ins = iter->dst.rt6_next; + fib6_purge_rt(iter, fn, info->nl_net); ++ if (fn->rr_ptr == iter) ++ fn->rr_ptr = NULL; + rt6_release(iter); + nsiblings--; + } else { +@@ -997,7 +1001,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + /* Create subtree root node */ + sfn = node_alloc(); + if (!sfn) +- goto st_failure; ++ goto failure; + + sfn->leaf = info->nl_net->ipv6.ip6_null_entry; + atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); +@@ -1013,12 +1017,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + + if (IS_ERR(sn)) { + /* If it is failed, discard just allocated +- root, and then (in st_failure) stale node ++ root, and then (in failure) stale node + in main tree. + */ + node_free(sfn); + err = PTR_ERR(sn); +- goto st_failure; ++ goto failure; + } + + /* Now link new subtree to main tree */ +@@ -1032,7 +1036,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + + if (IS_ERR(sn)) { + err = PTR_ERR(sn); +- goto st_failure; ++ goto failure; + } + } + +@@ -1074,22 +1078,22 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + atomic_inc(&pn->leaf->rt6i_ref); + } + #endif +- if (!(rt->dst.flags & DST_NOCACHE)) +- dst_free(&rt->dst); ++ goto failure; + } + return err; + +-#ifdef CONFIG_IPV6_SUBTREES +- /* Subtree creation failed, probably main tree node +- is orphan. If it is, shoot it. ++failure: ++ /* fn->leaf could be NULL if fn is an intermediate node and we ++ * failed to add the new route to it in both subtree creation ++ * failure and fib6_add_rt2node() failure case. ++ * In both cases, fib6_repair_tree() should be called to fix ++ * fn->leaf. + */ +-st_failure: + if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) + fib6_repair_tree(info->nl_net, fn); + if (!(rt->dst.flags & DST_NOCACHE)) + dst_free(&rt->dst); + return err; +-#endif + } + + /* +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c +index 391c3cbd2eed..101ed6c42808 100644 +--- a/net/irda/af_irda.c ++++ b/net/irda/af_irda.c +@@ -2223,7 +2223,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, + { + struct sock *sk = sock->sk; + struct irda_sock *self = irda_sk(sk); +- struct irda_device_list list; ++ struct irda_device_list list = { 0 }; + struct irda_device_info *discoveries; + struct irda_ias_set * ias_opt; /* IAS get/query params */ + struct ias_object * ias_obj; /* Object in IAS */ +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 2e1050ec2cf0..94bf810ad242 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, + #define BROADCAST_ONE 1 + #define BROADCAST_REGISTERED 2 + #define BROADCAST_PROMISC_ONLY 4 +-static int pfkey_broadcast(struct sk_buff *skb, ++static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, + int broadcast_flags, struct sock *one_sk, + struct net *net) + { +@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb, + rcu_read_unlock(); + + if (one_sk != NULL) +- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk); ++ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); + + kfree_skb(skb2); + kfree_skb(skb); +@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) + hdr = (struct sadb_msg *) pfk->dump.skb->data; + hdr->sadb_msg_seq = 0; + hdr->sadb_msg_errno = rc; +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = NULL; + } +@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / + sizeof(uint64_t)); + +- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk)); + + return 0; + } +@@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ + + xfrm_state_put(x); + +- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net); ++ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net); + + return 0; + } +@@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) + hdr->sadb_msg_seq = c->seq; + hdr->sadb_msg_pid = c->portid; + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x)); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); + + return 0; + } +@@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg + out_hdr->sadb_msg_reserved = 0; + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); + + return 0; + } +@@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad + return -ENOBUFS; + } + +- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk)); +- ++ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, ++ sock_net(sk)); + return 0; + } + +@@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr) + hdr->sadb_msg_errno = (uint8_t) 0; + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + +- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, ++ sock_net(sk)); + } + + static int key_notify_sa_flush(const struct km_event *c) +@@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c) + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); + + return 0; + } +@@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr) + out_hdr->sadb_msg_pid = pfk->dump.msg_portid; + + if (pfk->dump.skb) +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = out_skb; + +@@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb + new_hdr->sadb_msg_errno = 0; + } + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk)); ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk)); + return 0; + } + +@@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_seq = c->seq; + out_hdr->sadb_msg_pid = c->portid; +- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); + return 0; + + } +@@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp)); + err = 0; + + out: +@@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) + out_hdr->sadb_msg_pid = pfk->dump.msg_portid; + + if (pfk->dump.skb) +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = out_skb; + +@@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c) + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; +- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net); ++ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); + return 0; + + } +@@ -2814,7 +2815,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb + void *ext_hdrs[SADB_EXT_MAX]; + int err; + +- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), ++ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, + BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); + + memset(ext_hdrs, 0, sizeof(ext_hdrs)); +@@ -3036,7 +3037,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) + out_hdr->sadb_msg_seq = 0; + out_hdr->sadb_msg_pid = 0; + +- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + return 0; + } + +@@ -3226,7 +3228,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct + xfrm_ctx->ctx_len); + } + +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + } + + static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, +@@ -3424,7 +3427,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, + n_port->sadb_x_nat_t_port_port = sport; + n_port->sadb_x_nat_t_port_reserved = 0; + +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + } + + #ifdef CONFIG_NET_KEY_MIGRATE +@@ -3616,7 +3620,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + } + + /* broadcast migrate message to sockets */ +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net); + + return 0; + +diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c +index 5b9c884a452e..dde64c4565d2 100644 +--- a/net/netfilter/nf_nat_core.c ++++ b/net/netfilter/nf_nat_core.c +@@ -225,20 +225,21 @@ find_appropriate_src(struct net *net, + .tuple = tuple, + .zone = zone + }; +- struct rhlist_head *hl; ++ struct rhlist_head *hl, *h; + + hl = rhltable_lookup(&nf_nat_bysource_table, &key, + nf_nat_bysource_params); +- if (!hl) +- return 0; + +- ct = container_of(hl, typeof(*ct), nat_bysource); ++ rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) { ++ nf_ct_invert_tuplepr(result, ++ &ct->tuplehash[IP_CT_DIR_REPLY].tuple); ++ result->dst = tuple->dst; + +- nf_ct_invert_tuplepr(result, +- &ct->tuplehash[IP_CT_DIR_REPLY].tuple); +- result->dst = tuple->dst; ++ if (in_range(l3proto, l4proto, result, range)) ++ return 1; ++ } + +- return in_range(l3proto, l4proto, result, range); ++ return 0; + } + + /* For [FUTURE] fragmentation handling, we want the least-used +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index 4e03f64709bc..05d9f42fc309 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -1240,6 +1240,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, + goto out; + } + ++ OVS_CB(skb)->acts_origlen = acts->orig_len; + err = do_execute_actions(dp, skb, key, + acts->actions, acts->actions_len); + +diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c +index 4d67ea856067..453f806afe6e 100644 +--- a/net/openvswitch/datapath.c ++++ b/net/openvswitch/datapath.c +@@ -383,7 +383,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, + } + + static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, +- unsigned int hdrlen) ++ unsigned int hdrlen, int actions_attrlen) + { + size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) + + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ +@@ -400,7 +400,7 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, + + /* OVS_PACKET_ATTR_ACTIONS */ + if (upcall_info->actions_len) +- size += nla_total_size(upcall_info->actions_len); ++ size += nla_total_size(actions_attrlen); + + /* OVS_PACKET_ATTR_MRU */ + if (upcall_info->mru) +@@ -467,7 +467,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, + else + hlen = skb->len; + +- len = upcall_msg_size(upcall_info, hlen - cutlen); ++ len = upcall_msg_size(upcall_info, hlen - cutlen, ++ OVS_CB(skb)->acts_origlen); + user_skb = genlmsg_new(len, GFP_ATOMIC); + if (!user_skb) { + err = -ENOMEM; +diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h +index ab85c1cae255..e19ace428e38 100644 +--- a/net/openvswitch/datapath.h ++++ b/net/openvswitch/datapath.h +@@ -100,12 +100,14 @@ struct datapath { + * @input_vport: The original vport packet came in on. This value is cached + * when a packet is received by OVS. + * @mru: The maximum received fragement size; 0 if the packet is not ++ * @acts_origlen: The netlink size of the flow actions applied to this skb. + * @cutlen: The number of bytes from the packet end to be removed. + * fragmented. + */ + struct ovs_skb_cb { + struct vport *input_vport; + u16 mru; ++ u16 acts_origlen; + u32 cutlen; + }; + #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c +index a1aec0a6c789..50030519a89b 100644 +--- a/net/sched/act_ipt.c ++++ b/net/sched/act_ipt.c +@@ -41,6 +41,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, + { + struct xt_tgchk_param par; + struct xt_target *target; ++ struct ipt_entry e = {}; + int ret = 0; + + target = xt_request_find_target(AF_INET, t->u.user.name, +@@ -51,6 +52,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, + t->u.kernel.target = target; + memset(&par, 0, sizeof(par)); + par.table = table; ++ par.entryinfo = &e; + par.target = target; + par.targinfo = t->data; + par.hook_mask = hook; +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index ff27a85a71a9..195a3b2d9afc 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -277,9 +277,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) + void qdisc_hash_add(struct Qdisc *q) + { + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { +- struct Qdisc *root = qdisc_dev(q)->qdisc; +- +- WARN_ON_ONCE(root == &noop_qdisc); + ASSERT_RTNL(); + hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); + } +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index bc5e99584e41..ea8a56f76b32 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -434,6 +434,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) + qdisc_drop(head, sch, to_free); + + slot_queue_add(slot, skb); ++ qdisc_tree_reduce_backlog(sch, 0, delta); + return NET_XMIT_CN; + } + +@@ -465,8 +466,10 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ +- if (qlen != slot->qlen) ++ if (qlen != slot->qlen) { ++ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb)); + return NET_XMIT_CN; ++ } + + /* As we dropped a packet, better let upper stack know this */ + qdisc_tree_reduce_backlog(sch, 1, dropped); +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index 0c090600f377..ca4a63e3eadd 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -512,7 +512,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, + { + addr->sa.sa_family = AF_INET6; + addr->v6.sin6_port = port; ++ addr->v6.sin6_flowinfo = 0; + addr->v6.sin6_addr = *saddr; ++ addr->v6.sin6_scope_id = 0; + } + + /* Compare addresses exactly. +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c +index a4bc98265d88..266a30c8b88b 100644 +--- a/net/sunrpc/svcsock.c ++++ b/net/sunrpc/svcsock.c +@@ -408,6 +408,9 @@ static void svc_data_ready(struct sock *sk) + dprintk("svc: socket %p(inet %p), busy=%d\n", + svsk, sk, + test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); ++ ++ /* Refer to svc_setup_socket() for details. */ ++ rmb(); + svsk->sk_odata(sk); + if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) + svc_xprt_enqueue(&svsk->sk_xprt); +@@ -424,6 +427,9 @@ static void svc_write_space(struct sock *sk) + if (svsk) { + dprintk("svc: socket %p(inet %p), write_space busy=%d\n", + svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); ++ ++ /* Refer to svc_setup_socket() for details. */ ++ rmb(); + svsk->sk_owspace(sk); + svc_xprt_enqueue(&svsk->sk_xprt); + } +@@ -748,8 +754,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk) + dprintk("svc: socket %p TCP (listen) state change %d\n", + sk, sk->sk_state); + +- if (svsk) ++ if (svsk) { ++ /* Refer to svc_setup_socket() for details. */ ++ rmb(); + svsk->sk_odata(sk); ++ } ++ + /* + * This callback may called twice when a new connection + * is established as a child socket inherits everything +@@ -782,6 +792,8 @@ static void svc_tcp_state_change(struct sock *sk) + if (!svsk) + printk("svc: socket %p: no user data\n", sk); + else { ++ /* Refer to svc_setup_socket() for details. */ ++ rmb(); + svsk->sk_ostate(sk); + if (sk->sk_state != TCP_ESTABLISHED) { + set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); +@@ -1368,12 +1380,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, + return ERR_PTR(err); + } + +- inet->sk_user_data = svsk; + svsk->sk_sock = sock; + svsk->sk_sk = inet; + svsk->sk_ostate = inet->sk_state_change; + svsk->sk_odata = inet->sk_data_ready; + svsk->sk_owspace = inet->sk_write_space; ++ /* ++ * This barrier is necessary in order to prevent race condition ++ * with svc_data_ready(), svc_listen_data_ready() and others ++ * when calling callbacks above. ++ */ ++ wmb(); ++ inet->sk_user_data = svsk; + + /* Initialize the socket */ + if (sock->type == SOCK_DGRAM) +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c +index 1fd464764765..aedc476fac02 100644 +--- a/net/tipc/netlink_compat.c ++++ b/net/tipc/netlink_compat.c +@@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, + arg = nlmsg_new(0, GFP_KERNEL); + if (!arg) { + kfree_skb(msg->rep); ++ msg->rep = NULL; + return -ENOMEM; + } + + err = __tipc_nl_compat_dumpit(cmd, msg, arg); +- if (err) ++ if (err) { + kfree_skb(msg->rep); +- ++ msg->rep = NULL; ++ } + kfree_skb(arg); + + return err; +diff --git a/sound/core/control.c b/sound/core/control.c +index fb096cb20a80..995cde48c1be 100644 +--- a/sound/core/control.c ++++ b/sound/core/control.c +@@ -1156,7 +1156,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, + mutex_lock(&ue->card->user_ctl_lock); + change = ue->tlv_data_size != size; + if (!change) +- change = memcmp(ue->tlv_data, new_data, size); ++ change = memcmp(ue->tlv_data, new_data, size) != 0; + kfree(ue->tlv_data); + ue->tlv_data = new_data; + ue->tlv_data_size = size; +diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c +index f0e4d502d604..066b5df666f4 100644 +--- a/sound/firewire/iso-resources.c ++++ b/sound/firewire/iso-resources.c +@@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update); + */ + void fw_iso_resources_free(struct fw_iso_resources *r) + { +- struct fw_card *card = fw_parent_device(r->unit)->card; ++ struct fw_card *card; + int bandwidth, channel; + ++ /* Not initialized. */ ++ if (r->unit == NULL) ++ return; ++ card = fw_parent_device(r->unit)->card; ++ + mutex_lock(&r->mutex); + + if (r->allocated) { +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index c15c51bea26d..f2e4e99ce651 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), ++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), + SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 95c2749ac8a3..286efc3a6116 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + mdelay(20); + +- /* Zoom R16/24 needs a tiny delay here, otherwise requests like +- * get/set frequency return as failed despite actually succeeding. ++ /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here, ++ * otherwise requests like get/set frequency return as failed despite ++ * actually succeeding. + */ +- if (chip->usb_id == USB_ID(0x1686, 0x00dd) && ++ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || ++ chip->usb_id == USB_ID(0x046d, 0x0a46) || ++ chip->usb_id == USB_ID(0x0b0e, 0x0349)) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + mdelay(1); + } +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c +index 6c50d9f8e210..6a6f44dd594b 100644 +--- a/tools/perf/util/probe-event.c ++++ b/tools/perf/util/probe-event.c +@@ -163,7 +163,7 @@ static struct map *kernel_get_module_map(const char *module) + + /* A file path -- this is an offline module */ + if (module && strchr(module, '/')) +- return machine__findnew_module_map(host_machine, 0, module); ++ return dso__new_map(module); + + if (!module) + module = "kernel"; +@@ -173,6 +173,7 @@ static struct map *kernel_get_module_map(const char *module) + if (strncmp(pos->dso->short_name + 1, module, + pos->dso->short_name_len - 2) == 0 && + module[pos->dso->short_name_len - 2] == '\0') { ++ map__get(pos); + return pos; + } + } +@@ -188,15 +189,6 @@ struct map *get_target_map(const char *target, bool user) + return kernel_get_module_map(target); + } + +-static void put_target_map(struct map *map, bool user) +-{ +- if (map && user) { +- /* Only the user map needs to be released */ +- map__put(map); +- } +-} +- +- + static int convert_exec_to_group(const char *exec, char **result) + { + char *ptr1, *ptr2, *exec_copy; +@@ -412,7 +404,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo, + } + + out: +- put_target_map(map, uprobes); ++ map__put(map); + return ret; + + } +@@ -2944,7 +2936,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, + } + + out: +- put_target_map(map, pev->uprobes); ++ map__put(map); + free(syms); + return ret; + +@@ -3437,10 +3429,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter, + return ret; + + /* Get a symbol map */ +- if (user) +- map = dso__new_map(target); +- else +- map = kernel_get_module_map(target); ++ map = get_target_map(target, user); + if (!map) { + pr_err("Failed to get a map for %s\n", (target) ? : "kernel"); + return -EINVAL; +@@ -3472,9 +3461,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter, + } + + end: +- if (user) { +- map__put(map); +- } ++ map__put(map); + exit_probe_symbol_maps(); + + return ret; +diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh +index a676d3eefefb..b3c48fc6ea4b 100755 +--- a/tools/testing/selftests/ntb/ntb_test.sh ++++ b/tools/testing/selftests/ntb/ntb_test.sh +@@ -305,7 +305,7 @@ function perf_test() + echo "Running remote perf test $WITH DMA" + write_file "" $REMOTE_PERF/run + echo -n " " +- read_file $LOCAL_PERF/run ++ read_file $REMOTE_PERF/run + echo " Passed" + + _modprobe -r ntb_perf +@@ -326,6 +326,10 @@ function ntb_tool_tests() + link_test $LOCAL_TOOL $REMOTE_TOOL + link_test $REMOTE_TOOL $LOCAL_TOOL + ++ #Ensure the link is up on both sides before continuing ++ write_file Y $LOCAL_TOOL/link_event ++ write_file Y $REMOTE_TOOL/link_event ++ + for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do + PT=$(basename $PEER_TRANS) + write_file $MW_SIZE $LOCAL_TOOL/$PT