diff --git a/Makefile b/Makefile
index 72ed8ff90329..8ec52cd19526 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 142
+SUBLEVEL = 143
 EXTRAVERSION =
 NAME = Roaring Lionus
 
@@ -509,6 +509,39 @@ ifneq ($(filter install,$(MAKECMDGOALS)),)
         endif
 endif
 
+ifeq ($(cc-name),clang)
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET   := -target $(notdir $(CROSS_COMPILE:%-=%))
+GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
+endif
+ifneq ($(GCC_TOOLCHAIN),)
+CLANG_GCC_TC   := -gcc-toolchain $(GCC_TOOLCHAIN)
+endif
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+# Quiet clang warning: comparison of unsigned expression < 0 is always false
+KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
+# source of a reference will be _MergedGlobals and not on of the whitelisted 
names.
+# See modpost pattern 2
+KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+else
+
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.build)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+endif
+
+
 ifeq ($(mixed-targets),1)
 # ===========================================================================
 # We're called with mixed targets (*config and build targets).
@@ -704,38 +737,6 @@ ifdef CONFIG_CC_STACKPROTECTOR
 endif
 KBUILD_CFLAGS += $(stackp-flag)
 
-ifeq ($(cc-name),clang)
-ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET   := -target $(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
-endif
-ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC   := -gcc-toolchain $(GCC_TOOLCHAIN)
-endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
-# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted 
names.
-# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
-else
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.build)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
-endif
-
 ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
 else
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 6be9ee148b78..e14ddca59d02 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -104,7 +104,7 @@ tune-$(CONFIG_CPU_V6K)              =$(call 
cc-option,-mtune=arm1136j-s,-mtune=strongarm)
 tune-y := $(tune-y)
 
 ifeq ($(CONFIG_AEABI),y)
-CFLAGS_ABI     :=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp
+CFLAGS_ABI     :=-mabi=aapcs-linux -mfpu=vfp
 else
 CFLAGS_ABI     :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call 
cc-option,-mno-thumb-interwork,)
 endif
diff --git a/arch/arm/boot/compressed/Makefile 
b/arch/arm/boot/compressed/Makefile
index d50430c40045..552c7d7f84ce 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -112,7 +112,7 @@ CFLAGS_fdt_ro.o := $(nossp_flags)
 CFLAGS_fdt_rw.o := $(nossp_flags)
 CFLAGS_fdt_wip.o := $(nossp_flags)
 
-ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
+ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin 
-I$(obj)
 asflags-y := -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
diff --git a/arch/arm/firmware/trusted_foundations.c 
b/arch/arm/firmware/trusted_foundations.c
index 3fb1b5a1dce9..689e6565abfc 100644
--- a/arch/arm/firmware/trusted_foundations.c
+++ b/arch/arm/firmware/trusted_foundations.c
@@ -31,21 +31,25 @@
 
 static unsigned long cpu_boot_addr;
 
-static void __naked tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
 {
+       register u32 r0 asm("r0") = type;
+       register u32 r1 asm("r1") = arg1;
+       register u32 r2 asm("r2") = arg2;
+
        asm volatile(
                ".arch_extension        sec\n\t"
-               "stmfd  sp!, {r4 - r11, lr}\n\t"
+               "stmfd  sp!, {r4 - r11}\n\t"
                __asmeq("%0", "r0")
                __asmeq("%1", "r1")
                __asmeq("%2", "r2")
                "mov    r3, #0\n\t"
                "mov    r4, #0\n\t"
                "smc    #0\n\t"
-               "ldmfd  sp!, {r4 - r11, pc}"
+               "ldmfd  sp!, {r4 - r11}\n\t"
                :
-               : "r" (type), "r" (arg1), "r" (arg2)
-               : "memory");
+               : "r" (r0), "r" (r1), "r" (r2)
+               : "memory", "r3", "r12", "lr");
 }
 
 static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 655a65eaf105..cadf99923600 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -437,26 +437,6 @@ int x86_setup_perfctr(struct perf_event *event)
        if (config == -1LL)
                return -EINVAL;
 
-       /*
-        * Branch tracing:
-        */
-       if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
-           !attr->freq && hwc->sample_period == 1) {
-               /* BTS is not supported by this architecture. */
-               if (!x86_pmu.bts_active)
-                       return -EOPNOTSUPP;
-
-               /* BTS is currently only allowed for user-mode. */
-               if (!attr->exclude_kernel)
-                       return -EOPNOTSUPP;
-
-               /* disallow bts if conflicting events are present */
-               if (x86_add_exclusive(x86_lbr_exclusive_lbr))
-                       return -EBUSY;
-
-               event->destroy = hw_perf_lbr_event_destroy;
-       }
-
        hwc->config |= config;
 
        return 0;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 815039327932..4f8560774082 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2198,16 +2198,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 static struct event_constraint *
 intel_bts_constraints(struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
-       unsigned int hw_event, bts_event;
-
-       if (event->attr.freq)
-               return NULL;
-
-       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
-       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
-
-       if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
+       if (unlikely(intel_pmu_has_bts(event)))
                return &bts_constraint;
 
        return NULL;
@@ -2822,10 +2813,47 @@ static unsigned long 
intel_pmu_free_running_flags(struct perf_event *event)
        return flags;
 }
 
+static int intel_pmu_bts_config(struct perf_event *event)
+{
+       struct perf_event_attr *attr = &event->attr;
+
+       if (unlikely(intel_pmu_has_bts(event))) {
+               /* BTS is not supported by this architecture. */
+               if (!x86_pmu.bts_active)
+                       return -EOPNOTSUPP;
+
+               /* BTS is currently only allowed for user-mode. */
+               if (!attr->exclude_kernel)
+                       return -EOPNOTSUPP;
+
+               /* disallow bts if conflicting events are present */
+               if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+                       return -EBUSY;
+
+               event->destroy = hw_perf_lbr_event_destroy;
+       }
+
+       return 0;
+}
+
+static int core_pmu_hw_config(struct perf_event *event)
+{
+       int ret = x86_pmu_hw_config(event);
+
+       if (ret)
+               return ret;
+
+       return intel_pmu_bts_config(event);
+}
+
 static int intel_pmu_hw_config(struct perf_event *event)
 {
        int ret = x86_pmu_hw_config(event);
 
+       if (ret)
+               return ret;
+
+       ret = intel_pmu_bts_config(event);
        if (ret)
                return ret;
 
@@ -2848,7 +2876,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                /*
                 * BTS is set up earlier in this path, so don't account twice
                 */
-               if (!intel_pmu_has_bts(event)) {
+               if (!unlikely(intel_pmu_has_bts(event))) {
                        /* disallow lbr if conflicting events are present */
                        if (x86_add_exclusive(x86_lbr_exclusive_lbr))
                                return -EBUSY;
@@ -3265,7 +3293,7 @@ static __initconst const struct x86_pmu core_pmu = {
        .enable_all             = core_pmu_enable_all,
        .enable                 = core_pmu_enable_event,
        .disable                = x86_pmu_disable_event,
-       .hw_config              = x86_pmu_hw_config,
+       .hw_config              = core_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 1bfebbc4d156..7ace39c51ff7 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -835,11 +835,16 @@ static inline int amd_pmu_init(void)
 
 static inline bool intel_pmu_has_bts(struct perf_event *event)
 {
-       if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
-           !event->attr.freq && event->hw.sample_period == 1)
-               return true;
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int hw_event, bts_event;
+
+       if (event->attr.freq)
+               return false;
+
+       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
 
-       return false;
+       return hw_event == bts_event && hwc->sample_period == 1;
 }
 
 int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8a4d6bc8fed0..676edfc19a95 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4297,9 +4297,9 @@ static bool need_remote_flush(u64 old, u64 new)
 }
 
 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
-                                   const u8 *new, int *bytes)
+                                   int *bytes)
 {
-       u64 gentry;
+       u64 gentry = 0;
        int r;
 
        /*
@@ -4311,22 +4311,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu 
*vcpu, gpa_t *gpa,
                /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
                *gpa &= ~(gpa_t)7;
                *bytes = 8;
-               r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
-               if (r)
-                       gentry = 0;
-               new = (const u8 *)&gentry;
        }
 
-       switch (*bytes) {
-       case 4:
-               gentry = *(const u32 *)new;
-               break;
-       case 8:
-               gentry = *(const u64 *)new;
-               break;
-       default:
-               gentry = 0;
-               break;
+       if (*bytes == 4 || *bytes == 8) {
+               r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
+               if (r)
+                       gentry = 0;
        }
 
        return gentry;
@@ -4437,8 +4427,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, 
gpa_t gpa,
 
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
-       gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
-
        /*
         * No need to care whether allocation memory is successful
         * or not since pte prefetch is skiped if it does not have
@@ -4447,6 +4435,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, 
gpa_t gpa,
        mmu_topup_memory_caches(vcpu);
 
        spin_lock(&vcpu->kvm->mmu_lock);
+
+       gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
+
        ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5f44d63a9d69..4bc35ac28d11 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1672,21 +1672,31 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm 
*kvm, unsigned int id)
        return ERR_PTR(err);
 }
 
+static void svm_clear_current_vmcb(struct vmcb *vmcb)
+{
+       int i;
+
+       for_each_online_cpu(i)
+               cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       /*
+        * The vmcb page can be recycled, causing a false negative in
+        * svm_vcpu_load(). So, ensure that no logical CPU has this
+        * vmcb page recorded as its current vmcb.
+        */
+       svm_clear_current_vmcb(svm->vmcb);
+
        __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
        __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
        __free_page(virt_to_page(svm->nested.hsave));
        __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, svm);
-       /*
-        * The vmcb page can be recycled, causing a false negative in
-        * svm_vcpu_load(). So do a full IBPB now.
-        */
-       indirect_branch_prediction_barrier();
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5013ef165f44..27d13b870e07 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6661,7 +6661,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
        else {
                if (vcpu->arch.apicv_active)
                        kvm_x86_ops->sync_pir_to_irr(vcpu);
-               kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+               if (ioapic_in_kernel(vcpu->kvm))
+                       kvm_ioapic_scan_entry(vcpu, 
vcpu->arch.ioapic_handled_vectors);
        }
        bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
                  vcpu_to_synic(vcpu)->vec_bitmap, 256);
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 8e10e357ee32..f1af06b8f3cd 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -91,14 +91,14 @@ int main(void)
        DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
        DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
 #if XTENSA_HAVE_COPROCESSORS
-       DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
+       DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
+       DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
+       DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
+       DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
+       DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
+       DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
+       DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
+       DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
 #endif
        DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
        DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index e0ded48561db..570307c91846 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -85,18 +85,21 @@ void coprocessor_release_all(struct thread_info *ti)
 
 void coprocessor_flush_all(struct thread_info *ti)
 {
-       unsigned long cpenable;
+       unsigned long cpenable, old_cpenable;
        int i;
 
        preempt_disable();
 
+       RSR_CPENABLE(old_cpenable);
        cpenable = ti->cpenable;
+       WSR_CPENABLE(cpenable);
 
        for (i = 0; i < XCHAL_CP_MAX; i++) {
                if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
                        coprocessor_flush(ti, i);
                cpenable >>= 1;
        }
+       WSR_CPENABLE(old_cpenable);
 
        preempt_enable();
 }
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 10f56133b281..8e08cb4fd7df 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -2103,8 +2103,6 @@ asmlinkage void __naked cci_enable_port_for_self(void)
        [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
        [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
        [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
-
-       unreachable();
 }
 
 /**
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e2cec5b357fd..a32cd71f94bb 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1774,6 +1774,12 @@ static void atc_free_chan_resources(struct dma_chan 
*chan)
        atchan->descs_allocated = 0;
        atchan->status = 0;
 
+       /*
+        * Free atslave allocated in at_dma_xlate()
+        */
+       kfree(chan->private);
+       chan->private = NULL;
+
        dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
 }
 
@@ -1808,7 +1814,7 @@ static struct dma_chan *at_dma_xlate(struct 
of_phandle_args *dma_spec,
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
 
-       atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
+       atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
        if (!atslave)
                return NULL;
 
@@ -2139,6 +2145,8 @@ static int at_dma_remove(struct platform_device *pdev)
        struct resource         *io;
 
        at_dma_off(atdma);
+       if (pdev->dev.of_node)
+               of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&atdma->dma_common);
 
        dma_pool_destroy(atdma->memset_pool);
diff --git a/drivers/firmware/efi/libstub/Makefile 
b/drivers/firmware/efi/libstub/Makefile
index 2cd9496eb696..310f8feb5174 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -12,7 +12,8 @@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__KERNEL__ 
$(LINUX_INCLUDE) -O2 \
 
 cflags-$(CONFIG_ARM64)         := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
 cflags-$(CONFIG_ARM)           := $(subst -pg,,$(KBUILD_CFLAGS)) \
-                                  -fno-builtin -fpic -mno-single-pic-base
+                                  -fno-builtin -fpic \
+                                  $(call cc-option,-mno-single-pic-base)
 
 cflags-$(CONFIG_EFI_ARMSTUB)   += -I$(srctree)/scripts/dtc/libfdt
 
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c 
b/drivers/firmware/efi/libstub/efi-stub-helper.c
index aded10662020..09d10dcf1fc6 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -355,6 +355,14 @@ efi_status_t efi_parse_options(char *cmdline)
 {
        char *str;
 
+       /*
+        * Currently, the only efi= option we look for is 'nochunk', which
+        * is intended to work around known issues on certain x86 UEFI
+        * versions. So ignore for now on other architectures.
+        */
+       if (!IS_ENABLED(CONFIG_X86))
+               return EFI_SUCCESS;
+
        /*
         * If no EFI parameters were specified on the cmdline we've got
         * nothing to do.
@@ -528,7 +536,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t 
*sys_table_arg,
                        size = files[j].size;
                        while (size) {
                                unsigned long chunksize;
-                               if (size > __chunk_size)
+
+                               if (IS_ENABLED(CONFIG_X86) && size > 
__chunk_size)
                                        chunksize = __chunk_size;
                                else
                                        chunksize = size;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 1606e7f08f4b..784c45484825 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -448,6 +448,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, 
void *kbuffer,
        }
        wait_for_completion(&msginfo->waitevent);
 
+       if (msginfo->response.gpadl_created.creation_status != 0) {
+               pr_err("Failed to establish GPADL: err = 0x%x\n",
+                      msginfo->response.gpadl_created.creation_status);
+
+               ret = -EDQUOT;
+               goto cleanup;
+       }
+
        if (channel->rescind) {
                ret = -ENODEV;
                goto cleanup;
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c 
b/drivers/iio/magnetometer/st_magn_buffer.c
index 0a9e8fadfa9d..37ab30566464 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool 
state)
        return st_sensors_set_dataready_irq(indio_dev, state);
 }
 
-static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
-{
-       return st_sensors_set_enable(indio_dev, true);
-}
-
 static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
 {
        int err;
@@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev 
*indio_dev)
        if (err < 0)
                goto st_magn_buffer_postenable_error;
 
-       return err;
+       return st_sensors_set_enable(indio_dev, true);
 
 st_magn_buffer_postenable_error:
        kfree(mdata->buffer_data);
@@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev 
*indio_dev)
        int err;
        struct st_sensor_data *mdata = iio_priv(indio_dev);
 
-       err = iio_triggered_buffer_predisable(indio_dev);
+       err = st_sensors_set_enable(indio_dev, false);
        if (err < 0)
                goto st_magn_buffer_predisable_error;
 
-       err = st_sensors_set_enable(indio_dev, false);
+       err = iio_triggered_buffer_predisable(indio_dev);
 
 st_magn_buffer_predisable_error:
        kfree(mdata->buffer_data);
@@ -75,7 +70,6 @@ static int st_magn_buffer_predisable(struct iio_dev 
*indio_dev)
 }
 
 static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
-       .preenable = &st_magn_buffer_preenable,
        .postenable = &st_magn_buffer_postenable,
        .predisable = &st_magn_buffer_predisable,
 };
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c 
b/drivers/media/usb/em28xx/em28xx-dvb.c
index 8cedef0daae4..b0aea48907b7 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -2016,6 +2016,8 @@ static int em28xx_dvb_fini(struct em28xx *dev)
                }
        }
 
+       em28xx_unregister_dvb(dvb);
+
        /* remove I2C SEC */
        client = dvb->i2c_client_sec;
        if (client) {
@@ -2037,7 +2039,6 @@ static int em28xx_dvb_fini(struct em28xx *dev)
                i2c_unregister_device(client);
        }
 
-       em28xx_unregister_dvb(dvb);
        kfree(dvb);
        dev->dvb = NULL;
        kref_put(&dev->ref, em28xx_free_device);
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index f806a4471eb9..32ab0f43f506 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -414,7 +414,7 @@ static int scif_create_remote_lookup(struct scif_dev 
*remote_dev,
                if (err)
                        goto error_window;
                err = scif_map_page(&window->num_pages_lookup.lookup[j],
-                                   vmalloc_dma_phys ?
+                                   vmalloc_num_pages ?
                                    vmalloc_to_page(&window->num_pages[i]) :
                                    virt_to_page(&window->num_pages[i]),
                                    remote_dev);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index a31f4610b493..2c2604e3f633 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct 
net_device *ndev)
                         * it just report sending a packet to the target
                         * (without actual packet transfer).
                         */
-                       dev_kfree_skb_any(skb);
                        ndev->stats.tx_packets++;
                        ndev->stats.tx_bytes += skb->len;
+                       dev_kfree_skb_any(skb);
                }
        }
 
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 76465b117b72..f1f8227e7342 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -140,7 +140,6 @@ struct ipheth_device {
        struct usb_device *udev;
        struct usb_interface *intf;
        struct net_device *net;
-       struct sk_buff *tx_skb;
        struct urb *tx_urb;
        struct urb *rx_urb;
        unsigned char *tx_buf;
@@ -229,6 +228,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
        case -ENOENT:
        case -ECONNRESET:
        case -ESHUTDOWN:
+       case -EPROTO:
                return;
        case 0:
                break;
@@ -280,7 +280,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
                dev_err(&dev->intf->dev, "%s: urb status: %d\n",
                __func__, status);
 
-       dev_kfree_skb_irq(dev->tx_skb);
        netif_wake_queue(dev->net);
 }
 
@@ -410,7 +409,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device 
*net)
        if (skb->len > IPHETH_BUF_SIZE) {
                WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
                dev->net->stats.tx_dropped++;
-               dev_kfree_skb_irq(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -430,12 +429,11 @@ static int ipheth_tx(struct sk_buff *skb, struct 
net_device *net)
                dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
                        __func__, retval);
                dev->net->stats.tx_errors++;
-               dev_kfree_skb_irq(skb);
+               dev_kfree_skb_any(skb);
        } else {
-               dev->tx_skb = skb;
-
                dev->net->stats.tx_packets++;
                dev->net->stats.tx_bytes += skb->len;
+               dev_consume_skb_any(skb);
                netif_stop_queue(net);
        }
 
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c 
b/drivers/net/wireless/ti/wlcore/cmd.c
index 96f83f09b8c5..7f4da727bb7b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -35,7 +35,6 @@
 #include "wl12xx_80211.h"
 #include "cmd.h"
 #include "event.h"
-#include "ps.h"
 #include "tx.h"
 #include "hw_ops.h"
 
@@ -192,10 +191,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
 
        timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
 
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               return ret;
-
        do {
                if (time_after(jiffies, timeout_time)) {
                        wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -227,7 +222,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
        } while (!event);
 
 out:
-       wl1271_ps_elp_sleep(wl);
        kfree(events_vector);
        return ret;
 }
diff --git a/drivers/s390/net/qeth_core_main.c 
b/drivers/s390/net/qeth_core_main.c
index a5e603062ee0..8f77fc0630ce 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4540,8 +4540,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
 {
        struct qeth_ipa_cmd *cmd;
        struct qeth_arp_query_info *qinfo;
-       struct qeth_snmp_cmd *snmp;
        unsigned char *data;
+       void *snmp_data;
        __u16 data_len;
 
        QETH_CARD_TEXT(card, 3, "snpcmdcb");
@@ -4549,7 +4549,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
        cmd = (struct qeth_ipa_cmd *) sdata;
        data = (unsigned char *)((char *)cmd - reply->offset);
        qinfo = (struct qeth_arp_query_info *) reply->param;
-       snmp = &cmd->data.setadapterparms.data.snmp;
 
        if (cmd->hdr.return_code) {
                QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
@@ -4562,10 +4561,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
                return 0;
        }
        data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
-       if (cmd->data.setadapterparms.hdr.seq_no == 1)
-               data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
-       else
-               data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
+       if (cmd->data.setadapterparms.hdr.seq_no == 1) {
+               snmp_data = &cmd->data.setadapterparms.data.snmp;
+               data_len -= offsetof(struct qeth_ipa_cmd,
+                                    data.setadapterparms.data.snmp);
+       } else {
+               snmp_data = &cmd->data.setadapterparms.data.snmp.request;
+               data_len -= offsetof(struct qeth_ipa_cmd,
+                                    data.setadapterparms.data.snmp.request);
+       }
 
        /* check if there is enough room in userspace */
        if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
@@ -4578,16 +4582,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
        QETH_CARD_TEXT_(card, 4, "sseqn%i",
                cmd->data.setadapterparms.hdr.seq_no);
        /*copy entries to user buffer*/
-       if (cmd->data.setadapterparms.hdr.seq_no == 1) {
-               memcpy(qinfo->udata + qinfo->udata_offset,
-                      (char *)snmp,
-                      data_len + offsetof(struct qeth_snmp_cmd, data));
-               qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
-       } else {
-               memcpy(qinfo->udata + qinfo->udata_offset,
-                      (char *)&snmp->request, data_len);
-       }
+       memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
        qinfo->udata_offset += data_len;
+
        /* check if all replies received ... */
                QETH_CARD_TEXT_(card, 4, "srtot%i",
                               cmd->data.setadapterparms.hdr.used_total);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 1e8f68960014..808437c5ec49 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -64,6 +64,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Microsoft LifeCam-VX700 v2.0 */
        { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
+       { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 26efe8c7535f..ed6b9bfe3759 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1280,9 +1280,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int 
value, int protocol)
                unsigned transfer_in_flight;
                unsigned started;
 
-               if (dep->flags & DWC3_EP_STALL)
-                       return 0;
-
                if (dep->number > 1)
                        trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
                else
@@ -1307,8 +1304,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int 
value, int protocol)
                else
                        dep->flags |= DWC3_EP_STALL;
        } else {
-               if (!(dep->flags & DWC3_EP_STALL))
-                       return 0;
 
                ret = dwc3_send_clear_stall_ep_cmd(dep);
                if (ret)
diff --git a/drivers/usb/storage/unusual_realtek.h 
b/drivers/usb/storage/unusual_realtek.h
index 8fe624ad302a..7ca779493671 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -39,4 +39,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
                "USB Card Reader",
                USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
 
+UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
+               "Realtek",
+               "USB Card Reader",
+               USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
+UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
+               "Realtek",
+               "USB Card Reader",
+               USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
 #endif  /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f6e111984ce2..a7b69deb6d70 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2226,6 +2226,7 @@ static long btrfs_control_ioctl(struct file *file, 
unsigned int cmd,
        vol = memdup_user((void __user *)arg, sizeof(*vol));
        if (IS_ERR(vol))
                return PTR_ERR(vol);
+       vol->name[BTRFS_PATH_NAME_MAX] = '\0';
 
        switch (cmd) {
        case BTRFS_IOC_SCAN_DEV:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 9517de0e668c..fd6c74662e9a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1924,6 +1924,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle 
*trans,
                return ret;
        }
 
+       btrfs_trans_release_metadata(trans, root);
+       trans->block_rsv = NULL;
+
        /* make a pass through all the delayed refs we have so far
         * any runnings procs may add more while we are here
         */
@@ -1933,9 +1936,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle 
*trans,
                return ret;
        }
 
-       btrfs_trans_release_metadata(trans, root);
-       trans->block_rsv = NULL;
-
        cur_trans = trans->transaction;
 
        /*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index c6220a2daefd..07cc38ec66ca 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -278,8 +278,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, 
bool is_async)
                 */
                dio->iocb->ki_pos += transferred;
 
-               if (dio->op == REQ_OP_WRITE)
-                       ret = generic_write_sync(dio->iocb,  transferred);
+               if (ret > 0 && dio->op == REQ_OP_WRITE)
+                       ret = generic_write_sync(dio->iocb, ret);
                dio->iocb->ki_complete(dio->iocb, ret, 0);
        }
 
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index fbdb8f171893..babef30d440b 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -609,9 +609,9 @@ bad_block:          ext2_error(sb, "ext2_xattr_set",
        }
 
 cleanup:
-       brelse(bh);
        if (!(bh && header == HDR(bh)))
                kfree(header);
+       brelse(bh);
        up_write(&EXT2_I(inode)->xattr_sem);
 
        return error;
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 1def337b16d4..8e880f7f67b2 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -106,9 +106,9 @@ struct work_struct {
 #endif
 };
 
-#define WORK_DATA_INIT()       ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
+#define WORK_DATA_INIT()       ATOMIC_LONG_INIT((unsigned 
long)WORK_STRUCT_NO_POOL)
 #define WORK_DATA_STATIC_INIT()        \
-       ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
+       ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | 
WORK_STRUCT_STATIC))
 
 struct delayed_work {
        struct work_struct work;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9f7bba700e4e..7ea8da990b9d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1839,7 +1839,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
        }
 }
 
-static void freeze_page(struct page *page)
+static void unmap_page(struct page *page)
 {
        enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
                TTU_RMAP_LOCKED;
@@ -1862,7 +1862,7 @@ static void freeze_page(struct page *page)
        VM_BUG_ON_PAGE(ret, page + i - 1);
 }
 
-static void unfreeze_page(struct page *page)
+static void remap_page(struct page *page)
 {
        int i;
 
@@ -1876,26 +1876,13 @@ static void __split_huge_page_tail(struct page *head, 
int tail,
        struct page *page_tail = head + tail;
 
        VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
-       VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail);
 
        /*
-        * tail_page->_refcount is zero and not changing from under us. But
-        * get_page_unless_zero() may be running from under us on the
-        * tail_page. If we used atomic_set() below instead of atomic_inc() or
-        * atomic_add(), we would then run atomic_set() concurrently with
-        * get_page_unless_zero(), and atomic_set() is implemented in C not
-        * using locked ops. spin_unlock on x86 sometime uses locked ops
-        * because of PPro errata 66, 92, so unless somebody can guarantee
-        * atomic_set() here would be safe on all archs (and not only on x86),
-        * it's safer to use atomic_inc()/atomic_add().
+        * Clone page flags before unfreezing refcount.
+        *
+        * After successful get_page_unless_zero() might follow flags change,
+        * for exmaple lock_page() which set PG_waiters.
         */
-       if (PageAnon(head)) {
-               page_ref_inc(page_tail);
-       } else {
-               /* Additional pin to radix tree */
-               page_ref_add(page_tail, 2);
-       }
-
        page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
        page_tail->flags |= (head->flags &
                        ((1L << PG_referenced) |
@@ -1907,36 +1894,42 @@ static void __split_huge_page_tail(struct page *head, 
int tail,
                         (1L << PG_unevictable) |
                         (1L << PG_dirty)));
 
-       /*
-        * After clearing PageTail the gup refcount can be released.
-        * Page flags also must be visible before we make the page non-compound.
-        */
+       /* ->mapping in first tail page is compound_mapcount */
+       VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
+                       page_tail);
+       page_tail->mapping = head->mapping;
+       page_tail->index = head->index + tail;
+
+       /* Page flags must be visible before we make the page non-compound. */
        smp_wmb();
 
+       /*
+        * Clear PageTail before unfreezing page refcount.
+        *
+        * After successful get_page_unless_zero() might follow put_page()
+        * which needs correct compound_head().
+        */
        clear_compound_head(page_tail);
 
+       /* Finally unfreeze refcount. Additional reference from page cache. */
+       page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
+                                         PageSwapCache(head)));
+
        if (page_is_young(head))
                set_page_young(page_tail);
        if (page_is_idle(head))
                set_page_idle(page_tail);
 
-       /* ->mapping in first tail page is compound_mapcount */
-       VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
-                       page_tail);
-       page_tail->mapping = head->mapping;
-
-       page_tail->index = head->index + tail;
        page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
        lru_add_page_tail(head, page_tail, lruvec, list);
 }
 
 static void __split_huge_page(struct page *page, struct list_head *list,
-               unsigned long flags)
+               pgoff_t end, unsigned long flags)
 {
        struct page *head = compound_head(page);
        struct zone *zone = page_zone(head);
        struct lruvec *lruvec;
-       pgoff_t end = -1;
        int i;
 
        lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
@@ -1944,9 +1937,6 @@ static void __split_huge_page(struct page *page, struct 
list_head *list,
        /* complete memcg works before add pages to LRU */
        mem_cgroup_split_huge_fixup(head);
 
-       if (!PageAnon(page))
-               end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
-
        for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
                __split_huge_page_tail(head, i, lruvec, list);
                /* Some pages can be beyond i_size: drop them from page cache */
@@ -1971,7 +1961,7 @@ static void __split_huge_page(struct page *page, struct 
list_head *list,
 
        spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
 
-       unfreeze_page(head);
+       remap_page(head);
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                struct page *subpage = head + i;
@@ -2099,6 +2089,7 @@ int split_huge_page_to_list(struct page *page, struct 
list_head *list)
        int count, mapcount, extra_pins, ret;
        bool mlocked;
        unsigned long flags;
+       pgoff_t end;
 
        VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -2120,6 +2111,7 @@ int split_huge_page_to_list(struct page *page, struct 
list_head *list)
                        goto out;
                }
                extra_pins = 0;
+               end = -1;
                mapping = NULL;
                anon_vma_lock_write(anon_vma);
        } else {
@@ -2135,10 +2127,19 @@ int split_huge_page_to_list(struct page *page, struct 
list_head *list)
                extra_pins = HPAGE_PMD_NR;
                anon_vma = NULL;
                i_mmap_lock_read(mapping);
+
+               /*
+                *__split_huge_page() may need to trim off pages beyond EOF:
+                * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
+                * which cannot be nested inside the page tree lock. So note
+                * end now: i_size itself may be changed at any moment, but
+                * head page lock is good enough to serialize the trimming.
+                */
+               end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
        }
 
        /*
-        * Racy check if we can split the page, before freeze_page() will
+        * Racy check if we can split the page, before unmap_page() will
         * split PMDs
         */
        if (total_mapcount(head) != page_count(head) - extra_pins - 1) {
@@ -2147,7 +2148,7 @@ int split_huge_page_to_list(struct page *page, struct 
list_head *list)
        }
 
        mlocked = PageMlocked(page);
-       freeze_page(head);
+       unmap_page(head);
        VM_BUG_ON_PAGE(compound_mapcount(head), head);
 
        /* Make sure the page is not on per-CPU pagevec as it takes pin */
@@ -2184,7 +2185,7 @@ int split_huge_page_to_list(struct page *page, struct 
list_head *list)
                if (mapping)
                        __dec_node_page_state(page, NR_SHMEM_THPS);
                spin_unlock(&pgdata->split_queue_lock);
-               __split_huge_page(page, list, flags);
+               __split_huge_page(page, list, end, flags);
                ret = 0;
        } else {
                if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
@@ -2199,7 +2200,7 @@ int split_huge_page_to_list(struct page *page, struct 
list_head *list)
 fail:          if (mapping)
                        spin_unlock(&mapping->tree_lock);
                spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
-               unfreeze_page(head);
+               remap_page(head);
                ret = -EBUSY;
        }
 
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 1df37ee996d5..e0cfc3a54b6a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1286,7 +1286,7 @@ static void retract_page_tables(struct address_space 
*mapping, pgoff_t pgoff)
  * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
  *
  * Basic scheme is simple, details are more complex:
- *  - allocate and freeze a new huge page;
+ *  - allocate and lock a new huge page;
  *  - scan over radix tree replacing old pages the new one
  *    + swap in pages if necessary;
  *    + fill in gaps;
@@ -1294,11 +1294,11 @@ static void retract_page_tables(struct address_space 
*mapping, pgoff_t pgoff)
  *  - if replacing succeed:
  *    + copy data over;
  *    + free old pages;
- *    + unfreeze huge page;
+ *    + unlock huge page;
  *  - if replacing failed;
  *    + put all pages back and unfreeze them;
  *    + restore gaps in the radix-tree;
- *    + free huge page;
+ *    + unlock and free huge page;
  */
 static void collapse_shmem(struct mm_struct *mm,
                struct address_space *mapping, pgoff_t start,
@@ -1332,18 +1332,15 @@ static void collapse_shmem(struct mm_struct *mm,
                goto out;
        }
 
+       __SetPageLocked(new_page);
+       __SetPageSwapBacked(new_page);
        new_page->index = start;
        new_page->mapping = mapping;
-       __SetPageSwapBacked(new_page);
-       __SetPageLocked(new_page);
-       BUG_ON(!page_ref_freeze(new_page, 1));
-
 
        /*
-        * At this point the new_page is 'frozen' (page_count() is zero), locked
-        * and not up-to-date. It's safe to insert it into radix tree, because
-        * nobody would be able to map it or use it in other way until we
-        * unfreeze it.
+        * At this point the new_page is locked and not up-to-date.
+        * It's safe to insert it into the page cache, because nobody would
+        * be able to map it or use it in another way until we unlock it.
         */
 
        index = start;
@@ -1351,19 +1348,29 @@ static void collapse_shmem(struct mm_struct *mm,
        radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
                int n = min(iter.index, end) - index;
 
+               /*
+                * Stop if extent has been hole-punched, and is now completely
+                * empty (the more obvious i_size_read() check would take an
+                * irq-unsafe seqlock on 32-bit).
+                */
+               if (n >= HPAGE_PMD_NR) {
+                       result = SCAN_TRUNCATED;
+                       goto tree_locked;
+               }
+
                /*
                 * Handle holes in the radix tree: charge it from shmem and
                 * insert relevant subpage of new_page into the radix-tree.
                 */
                if (n && !shmem_charge(mapping->host, n)) {
                        result = SCAN_FAIL;
-                       break;
+                       goto tree_locked;
                }
-               nr_none += n;
                for (; index < min(iter.index, end); index++) {
                        radix_tree_insert(&mapping->page_tree, index,
                                        new_page + (index % HPAGE_PMD_NR));
                }
+               nr_none += n;
 
                /* We are done. */
                if (index >= end)
@@ -1379,12 +1386,12 @@ static void collapse_shmem(struct mm_struct *mm,
                                result = SCAN_FAIL;
                                goto tree_unlocked;
                        }
-                       spin_lock_irq(&mapping->tree_lock);
                } else if (trylock_page(page)) {
                        get_page(page);
+                       spin_unlock_irq(&mapping->tree_lock);
                } else {
                        result = SCAN_PAGE_LOCK;
-                       break;
+                       goto tree_locked;
                }
 
                /*
@@ -1393,17 +1400,24 @@ static void collapse_shmem(struct mm_struct *mm,
                 */
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                VM_BUG_ON_PAGE(!PageUptodate(page), page);
-               VM_BUG_ON_PAGE(PageTransCompound(page), page);
+
+               /*
+                * If file was truncated then extended, or hole-punched, before
+                * we locked the first page, then a THP might be there already.
+                */
+               if (PageTransCompound(page)) {
+                       result = SCAN_PAGE_COMPOUND;
+                       goto out_unlock;
+               }
 
                if (page_mapping(page) != mapping) {
                        result = SCAN_TRUNCATED;
                        goto out_unlock;
                }
-               spin_unlock_irq(&mapping->tree_lock);
 
                if (isolate_lru_page(page)) {
                        result = SCAN_DEL_PAGE_LRU;
-                       goto out_isolate_failed;
+                       goto out_unlock;
                }
 
                if (page_mapped(page))
@@ -1425,7 +1439,9 @@ static void collapse_shmem(struct mm_struct *mm,
                 */
                if (!page_ref_freeze(page, 3)) {
                        result = SCAN_PAGE_COUNT;
-                       goto out_lru;
+                       spin_unlock_irq(&mapping->tree_lock);
+                       putback_lru_page(page);
+                       goto out_unlock;
                }
 
                /*
@@ -1441,17 +1457,10 @@ static void collapse_shmem(struct mm_struct *mm,
                slot = radix_tree_iter_next(&iter);
                index++;
                continue;
-out_lru:
-               spin_unlock_irq(&mapping->tree_lock);
-               putback_lru_page(page);
-out_isolate_failed:
-               unlock_page(page);
-               put_page(page);
-               goto tree_unlocked;
 out_unlock:
                unlock_page(page);
                put_page(page);
-               break;
+               goto tree_unlocked;
        }
 
        /*
@@ -1459,14 +1468,18 @@ static void collapse_shmem(struct mm_struct *mm,
         * This code only triggers if there's nothing in radix tree
         * beyond 'end'.
         */
-       if (result == SCAN_SUCCEED && index < end) {
+       if (index < end) {
                int n = end - index;
 
+               /* Stop if extent has been truncated, and is now empty */
+               if (n >= HPAGE_PMD_NR) {
+                       result = SCAN_TRUNCATED;
+                       goto tree_locked;
+               }
                if (!shmem_charge(mapping->host, n)) {
                        result = SCAN_FAIL;
                        goto tree_locked;
                }
-
                for (; index < end; index++) {
                        radix_tree_insert(&mapping->page_tree, index,
                                        new_page + (index % HPAGE_PMD_NR));
@@ -1474,57 +1487,62 @@ static void collapse_shmem(struct mm_struct *mm,
                nr_none += n;
        }
 
+       __inc_node_page_state(new_page, NR_SHMEM_THPS);
+       if (nr_none) {
+               struct zone *zone = page_zone(new_page);
+
+               __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
+               __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
+       }
+
 tree_locked:
        spin_unlock_irq(&mapping->tree_lock);
 tree_unlocked:
 
        if (result == SCAN_SUCCEED) {
-               unsigned long flags;
-               struct zone *zone = page_zone(new_page);
-
                /*
                 * Replacing old pages with new one has succeed, now we need to
                 * copy the content and free old pages.
                 */
+               index = start;
                list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+                       while (index < page->index) {
+                               clear_highpage(new_page + (index % 
HPAGE_PMD_NR));
+                               index++;
+                       }
                        copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
                                        page);
                        list_del(&page->lru);
-                       unlock_page(page);
-                       page_ref_unfreeze(page, 1);
                        page->mapping = NULL;
+                       page_ref_unfreeze(page, 1);
                        ClearPageActive(page);
                        ClearPageUnevictable(page);
+                       unlock_page(page);
                        put_page(page);
+                       index++;
                }
-
-               local_irq_save(flags);
-               __inc_node_page_state(new_page, NR_SHMEM_THPS);
-               if (nr_none) {
-                       __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, 
nr_none);
-                       __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, 
nr_none);
+               while (index < end) {
+                       clear_highpage(new_page + (index % HPAGE_PMD_NR));
+                       index++;
                }
-               local_irq_restore(flags);
 
-               /*
-                * Remove pte page tables, so we can re-faulti
-                * the page as huge.
-                */
-               retract_page_tables(mapping, start);
-
-               /* Everything is ready, let's unfreeze the new_page */
-               set_page_dirty(new_page);
                SetPageUptodate(new_page);
-               page_ref_unfreeze(new_page, HPAGE_PMD_NR);
+               page_ref_add(new_page, HPAGE_PMD_NR - 1);
+               set_page_dirty(new_page);
                mem_cgroup_commit_charge(new_page, memcg, false, true);
                lru_cache_add_anon(new_page);
-               unlock_page(new_page);
 
+               /*
+                * Remove pte page tables, so we can re-fault the page as huge.
+                */
+               retract_page_tables(mapping, start);
                *hpage = NULL;
        } else {
                /* Something went wrong: rollback changes to the radix-tree */
-               shmem_uncharge(mapping->host, nr_none);
                spin_lock_irq(&mapping->tree_lock);
+               mapping->nrpages -= nr_none;
+               shmem_uncharge(mapping->host, nr_none);
+
                radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
                                start) {
                        if (iter.index >= end)
@@ -1549,20 +1567,19 @@ static void collapse_shmem(struct mm_struct *mm,
                        page_ref_unfreeze(page, 2);
                        radix_tree_replace_slot(slot, page);
                        spin_unlock_irq(&mapping->tree_lock);
-                       putback_lru_page(page);
                        unlock_page(page);
+                       putback_lru_page(page);
                        spin_lock_irq(&mapping->tree_lock);
                        slot = radix_tree_iter_next(&iter);
                }
                VM_BUG_ON(nr_none);
                spin_unlock_irq(&mapping->tree_lock);
 
-               /* Unfreeze new_page, caller would take care about freeing it */
-               page_ref_unfreeze(new_page, 1);
                mem_cgroup_cancel_charge(new_page, memcg, true);
-               unlock_page(new_page);
                new_page->mapping = NULL;
        }
+
+       unlock_page(new_page);
 out:
        VM_BUG_ON(!list_empty(&pagelist));
        /* TODO: tracepoints */
diff --git a/mm/shmem.c b/mm/shmem.c
index 358a92be43eb..9b17bd4cbc5e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -181,6 +181,38 @@ static inline void shmem_unacct_blocks(unsigned long 
flags, long pages)
                vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
 }
 
+static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
+{
+       struct shmem_inode_info *info = SHMEM_I(inode);
+       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+
+       if (shmem_acct_block(info->flags, pages))
+               return false;
+
+       if (sbinfo->max_blocks) {
+               if (percpu_counter_compare(&sbinfo->used_blocks,
+                                          sbinfo->max_blocks - pages) > 0)
+                       goto unacct;
+               percpu_counter_add(&sbinfo->used_blocks, pages);
+       }
+
+       return true;
+
+unacct:
+       shmem_unacct_blocks(info->flags, pages);
+       return false;
+}
+
+static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
+{
+       struct shmem_inode_info *info = SHMEM_I(inode);
+       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+
+       if (sbinfo->max_blocks)
+               percpu_counter_sub(&sbinfo->used_blocks, pages);
+       shmem_unacct_blocks(info->flags, pages);
+}
+
 static const struct super_operations shmem_ops;
 static const struct address_space_operations shmem_aops;
 static const struct file_operations shmem_file_operations;
@@ -237,61 +269,46 @@ static void shmem_recalc_inode(struct inode *inode)
 
        freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
        if (freed > 0) {
-               struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
-               if (sbinfo->max_blocks)
-                       percpu_counter_add(&sbinfo->used_blocks, -freed);
                info->alloced -= freed;
                inode->i_blocks -= freed * BLOCKS_PER_PAGE;
-               shmem_unacct_blocks(info->flags, freed);
+               shmem_inode_unacct_blocks(inode, freed);
        }
 }
 
 bool shmem_charge(struct inode *inode, long pages)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        unsigned long flags;
 
-       if (shmem_acct_block(info->flags, pages))
+       if (!shmem_inode_acct_block(inode, pages))
                return false;
+
+       /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
+       inode->i_mapping->nrpages += pages;
+
        spin_lock_irqsave(&info->lock, flags);
        info->alloced += pages;
        inode->i_blocks += pages * BLOCKS_PER_PAGE;
        shmem_recalc_inode(inode);
        spin_unlock_irqrestore(&info->lock, flags);
-       inode->i_mapping->nrpages += pages;
 
-       if (!sbinfo->max_blocks)
-               return true;
-       if (percpu_counter_compare(&sbinfo->used_blocks,
-                               sbinfo->max_blocks - pages) > 0) {
-               inode->i_mapping->nrpages -= pages;
-               spin_lock_irqsave(&info->lock, flags);
-               info->alloced -= pages;
-               shmem_recalc_inode(inode);
-               spin_unlock_irqrestore(&info->lock, flags);
-               shmem_unacct_blocks(info->flags, pages);
-               return false;
-       }
-       percpu_counter_add(&sbinfo->used_blocks, pages);
        return true;
 }
 
 void shmem_uncharge(struct inode *inode, long pages)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        unsigned long flags;
 
+       /* nrpages adjustment done by __delete_from_page_cache() or caller */
+
        spin_lock_irqsave(&info->lock, flags);
        info->alloced -= pages;
        inode->i_blocks -= pages * BLOCKS_PER_PAGE;
        shmem_recalc_inode(inode);
        spin_unlock_irqrestore(&info->lock, flags);
 
-       if (sbinfo->max_blocks)
-               percpu_counter_sub(&sbinfo->used_blocks, pages);
-       shmem_unacct_blocks(info->flags, pages);
+       shmem_inode_unacct_blocks(inode, pages);
 }
 
 /*
@@ -1424,9 +1441,10 @@ static struct page *shmem_alloc_page(gfp_t gfp,
 }
 
 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
-               struct shmem_inode_info *info, struct shmem_sb_info *sbinfo,
+               struct inode *inode,
                pgoff_t index, bool huge)
 {
+       struct shmem_inode_info *info = SHMEM_I(inode);
        struct page *page;
        int nr;
        int err = -ENOSPC;
@@ -1435,14 +1453,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
                huge = false;
        nr = huge ? HPAGE_PMD_NR : 1;
 
-       if (shmem_acct_block(info->flags, nr))
+       if (!shmem_inode_acct_block(inode, nr))
                goto failed;
-       if (sbinfo->max_blocks) {
-               if (percpu_counter_compare(&sbinfo->used_blocks,
-                                       sbinfo->max_blocks - nr) > 0)
-                       goto unacct;
-               percpu_counter_add(&sbinfo->used_blocks, nr);
-       }
 
        if (huge)
                page = shmem_alloc_hugepage(gfp, info, index);
@@ -1455,10 +1467,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
        }
 
        err = -ENOMEM;
-       if (sbinfo->max_blocks)
-               percpu_counter_add(&sbinfo->used_blocks, -nr);
-unacct:
-       shmem_unacct_blocks(info->flags, nr);
+       shmem_inode_unacct_blocks(inode, nr);
 failed:
        return ERR_PTR(err);
 }
@@ -1485,11 +1494,13 @@ static int shmem_replace_page(struct page **pagep, 
gfp_t gfp,
 {
        struct page *oldpage, *newpage;
        struct address_space *swap_mapping;
+       swp_entry_t entry;
        pgoff_t swap_index;
        int error;
 
        oldpage = *pagep;
-       swap_index = page_private(oldpage);
+       entry.val = page_private(oldpage);
+       swap_index = swp_offset(entry);
        swap_mapping = page_mapping(oldpage);
 
        /*
@@ -1508,7 +1519,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t 
gfp,
        __SetPageLocked(newpage);
        __SetPageSwapBacked(newpage);
        SetPageUptodate(newpage);
-       set_page_private(newpage, swap_index);
+       set_page_private(newpage, entry.val);
        SetPageSwapCache(newpage);
 
        /*
@@ -1718,10 +1729,9 @@ static int shmem_getpage_gfp(struct inode *inode, 
pgoff_t index,
                }
 
 alloc_huge:
-               page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
-                               index, true);
+               page = shmem_alloc_and_acct_page(gfp, inode, index, true);
                if (IS_ERR(page)) {
-alloc_nohuge:          page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
+alloc_nohuge:          page = shmem_alloc_and_acct_page(gfp, inode,
                                        index, false);
                }
                if (IS_ERR(page)) {
@@ -1843,10 +1853,7 @@ alloc_nohuge:            page = 
shmem_alloc_and_acct_page(gfp, info, sbinfo,
         * Error recovery.
         */
 unacct:
-       if (sbinfo->max_blocks)
-               percpu_counter_sub(&sbinfo->used_blocks,
-                               1 << compound_order(page));
-       shmem_unacct_blocks(info->flags, 1 << compound_order(page));
+       shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
 
        if (PageTransHuge(page)) {
                unlock_page(page);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 68ecb7d71c2b..dca1fed0d7da 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4421,6 +4421,10 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        nf_reset(skb);
        nf_reset_trace(skb);
 
+#ifdef CONFIG_NET_SWITCHDEV
+       skb->offload_fwd_mark = 0;
+#endif
+
        if (!xnet)
                return;
 
diff --git a/sound/core/control.c b/sound/core/control.c
index 995cde48c1be..511368fe974e 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -346,6 +346,40 @@ static int snd_ctl_find_hole(struct snd_card *card, 
unsigned int count)
        return 0;
 }
 
+/* add a new kcontrol object; call with card->controls_rwsem locked */
+static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+{
+       struct snd_ctl_elem_id id;
+       unsigned int idx;
+       unsigned int count;
+
+       id = kcontrol->id;
+       if (id.index > UINT_MAX - kcontrol->count)
+               return -EINVAL;
+
+       if (snd_ctl_find_id(card, &id)) {
+               dev_err(card->dev,
+                       "control %i:%i:%i:%s:%i is already present\n",
+                       id.iface, id.device, id.subdevice, id.name, id.index);
+               return -EBUSY;
+       }
+
+       if (snd_ctl_find_hole(card, kcontrol->count) < 0)
+               return -ENOMEM;
+
+       list_add_tail(&kcontrol->list, &card->controls);
+       card->controls_count += kcontrol->count;
+       kcontrol->id.numid = card->last_numid + 1;
+       card->last_numid += kcontrol->count;
+
+       id = kcontrol->id;
+       count = kcontrol->count;
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
+               snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+
+       return 0;
+}
+
 /**
  * snd_ctl_add - add the control instance to the card
  * @card: the card instance
@@ -362,45 +396,18 @@ static int snd_ctl_find_hole(struct snd_card *card, 
unsigned int count)
  */
 int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
 {
-       struct snd_ctl_elem_id id;
-       unsigned int idx;
-       unsigned int count;
        int err = -EINVAL;
 
        if (! kcontrol)
                return err;
        if (snd_BUG_ON(!card || !kcontrol->info))
                goto error;
-       id = kcontrol->id;
-       if (id.index > UINT_MAX - kcontrol->count)
-               goto error;
 
        down_write(&card->controls_rwsem);
-       if (snd_ctl_find_id(card, &id)) {
-               up_write(&card->controls_rwsem);
-               dev_err(card->dev, "control %i:%i:%i:%s:%i is already 
present\n",
-                                       id.iface,
-                                       id.device,
-                                       id.subdevice,
-                                       id.name,
-                                       id.index);
-               err = -EBUSY;
-               goto error;
-       }
-       if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
-               up_write(&card->controls_rwsem);
-               err = -ENOMEM;
-               goto error;
-       }
-       list_add_tail(&kcontrol->list, &card->controls);
-       card->controls_count += kcontrol->count;
-       kcontrol->id.numid = card->last_numid + 1;
-       card->last_numid += kcontrol->count;
-       id = kcontrol->id;
-       count = kcontrol->count;
+       err = __snd_ctl_add(card, kcontrol);
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
-               snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+       if (err < 0)
+               goto error;
        return 0;
 
  error:
@@ -1354,9 +1361,12 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
                kctl->tlv.c = snd_ctl_elem_user_tlv;
 
        /* This function manage to free the instance on failure. */
-       err = snd_ctl_add(card, kctl);
-       if (err < 0)
-               return err;
+       down_write(&card->controls_rwsem);
+       err = __snd_ctl_add(card, kctl);
+       if (err < 0) {
+               snd_ctl_free_one(kctl);
+               goto unlock;
+       }
        offset = snd_ctl_get_ioff(kctl, &info->id);
        snd_ctl_build_ioff(&info->id, kctl, offset);
        /*
@@ -1367,10 +1377,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
         * which locks the element.
         */
 
-       down_write(&card->controls_rwsem);
        card->user_ctl_count++;
-       up_write(&card->controls_rwsem);
 
+ unlock:
+       up_write(&card->controls_rwsem);
        return 0;
 }
 
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 913b731d2236..f40330ddb9b2 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct snd_pcm_substream 
*substream)
        if (err < 0) {
                if (chip->release_dma)
                        chip->release_dma(chip, chip->dma_private_data, 
chip->dma1);
-               snd_free_pages(runtime->dma_area, runtime->dma_bytes);
                return err;
        }
        chip->playback_substream = substream;
@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct snd_pcm_substream 
*substream)
        if (err < 0) {
                if (chip->release_dma)
                        chip->release_dma(chip, chip->dma_private_data, 
chip->dma2);
-               snd_free_pages(runtime->dma_area, runtime->dma_bytes);
                return err;
        }
        chip->capture_substream = substream;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 82259ca61e64..c4840fda44b4 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, 
struct snd_ctl_elem_
 {
        struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
        int reg = kcontrol->private_value & 0xff;
-       int shift = (kcontrol->private_value >> 8) & 0xff;
+       int shift = (kcontrol->private_value >> 8) & 0x0f;
        int mask = (kcontrol->private_value >> 16) & 0xff;
        // int invert = (kcontrol->private_value >> 24) & 0xff;
        unsigned short value, old, new;
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index 30bdc971883b..017e241b0ec9 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(struct 
snd_pcm_substream *substream)
        runtime->hw = snd_cs4231_playback;
 
        err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
-       if (err < 0) {
-               snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+       if (err < 0)
                return err;
-       }
        chip->playback_substream = substream;
        chip->p_periods_sent = 0;
        snd_pcm_set_sync(substream);
@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struct 
snd_pcm_substream *substream)
        runtime->hw = snd_cs4231_capture;
 
        err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
-       if (err < 0) {
-               snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+       if (err < 0)
                return err;
-       }
        chip->capture_substream = substream;
        chip->c_periods_sent = 0;
        snd_pcm_set_sync(substream);

Reply via email to