diff --git a/Makefile b/Makefile
index 10aec937e9e4..7f54ac081cf3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 4
-SUBLEVEL = 53
+SUBLEVEL = 54
 EXTRAVERSION =
 NAME = Blurry Fish Butt
 
diff --git a/arch/s390/include/asm/processor.h 
b/arch/s390/include/asm/processor.h
index c1ea67db8404..c61ed7890cef 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -74,7 +74,8 @@ extern void execve_tail(void);
  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
  */
 
-#define TASK_SIZE_OF(tsk)      ((tsk)->mm->context.asce_limit)
+#define TASK_SIZE_OF(tsk)      ((tsk)->mm ? \
+                                (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
 #define TASK_UNMAPPED_BASE     (test_thread_flag(TIF_31BIT) ? \
                                        (1UL << 30) : (1UL << 41))
 #define TASK_SIZE              TASK_SIZE_OF(current)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1f581eb61bc2..d097d71685df 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -805,10 +805,10 @@ static void __init setup_randomness(void)
 {
        struct sysinfo_3_2_2 *vmms;
 
-       vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
-       if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
-               add_device_randomness(&vmms, vmms->count);
-       free_page((unsigned long) vmms);
+       vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+       if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+               add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * 
vmms->count);
+       memblock_free((unsigned long) vmms, PAGE_SIZE);
 }
 
 /*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 575dc123bda2..23e3f5d77a24 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -295,6 +295,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        struct kvm_memory_slot *memslot;
        int is_dirty = 0;
 
+       if (kvm_is_ucontrol(kvm))
+               return -EINVAL;
+
        mutex_lock(&kvm->slots_lock);
 
        r = -EINVAL;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64f60a48def1..3a7ae80dc49d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3499,7 +3499,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment 
*save)
        }
 
        vmcs_write16(sf->selector, var.selector);
-       vmcs_write32(sf->base, var.base);
+       vmcs_writel(sf->base, var.base);
        vmcs_write32(sf->limit, var.limit);
        vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
 }
@@ -7905,7 +7905,7 @@ static void kvm_flush_pml_buffers(struct kvm *kvm)
 static void vmx_dump_sel(char *name, uint32_t sel)
 {
        pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
-              name, vmcs_read32(sel),
+              name, vmcs_read16(sel),
               vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
               vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
               vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 9735691f37f1..49ccbd9022f6 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -133,6 +133,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
 
 __tagtable(BP_TAG_INITRD, parse_tag_initrd);
 
+#endif /* CONFIG_BLK_DEV_INITRD */
+
 #ifdef CONFIG_OF
 
 static int __init parse_tag_fdt(const bp_tag_t *tag)
@@ -145,8 +147,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt);
 
 #endif /* CONFIG_OF */
 
-#endif /* CONFIG_BLK_DEV_INITRD */
-
 static int __init parse_tag_cmdline(const bp_tag_t* tag)
 {
        strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 0beaa52df66b..5df8e1234505 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x300f) },
        { USB_DEVICE(0x04CA, 0x3010) },
        { USB_DEVICE(0x04CA, 0x3014) },
+       { USB_DEVICE(0x04CA, 0x3018) },
        { USB_DEVICE(0x0930, 0x0219) },
        { USB_DEVICE(0x0930, 0x021c) },
        { USB_DEVICE(0x0930, 0x0220) },
@@ -160,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c306b483de60..cd6b141b9825 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -208,6 +208,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index c161eeda417b..267749a94c5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3704,9 +3704,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device 
*adev,
        default:
                encoder->possible_crtcs = 0x3;
                break;
+       case 3:
+               encoder->possible_crtcs = 0x7;
+               break;
        case 4:
                encoder->possible_crtcs = 0xf;
                break;
+       case 5:
+               encoder->possible_crtcs = 0x1f;
+               break;
        case 6:
                encoder->possible_crtcs = 0x3f;
                break;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 810c51d92b99..30672a3df8a9 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -58,13 +58,9 @@ bool ast_is_vga_enabled(struct drm_device *dev)
                /* TODO 1180 */
        } else {
                ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
-               if (ch) {
-                       ast_open_key(ast);
-                       ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 
0xb6, 0xff);
-                       return ch & 0x04;
-               }
+               return !!(ch & 0x01);
        }
-       return 0;
+       return false;
 }
 
 static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
@@ -375,8 +371,8 @@ void ast_post_gpu(struct drm_device *dev)
        pci_write_config_dword(ast->dev->pdev, 0x04, reg);
 
        ast_enable_vga(dev);
-       ast_enable_mmio(dev);
        ast_open_key(ast);
+       ast_enable_mmio(dev);
        ast_set_def_ext_reg(dev);
 
        if (ast->chip == AST2300 || ast->chip == AST2400)
@@ -1630,12 +1626,44 @@ static void ast_init_dram_2300(struct drm_device *dev)
                temp |= 0x73;
                ast_write32(ast, 0x12008, temp);
 
+               param.dram_freq = 396;
                param.dram_type = AST_DDR3;
+               temp = ast_mindwm(ast, 0x1e6e2070);
                if (temp & 0x01000000)
                        param.dram_type = AST_DDR2;
-               param.dram_chipid = ast->dram_type;
-               param.dram_freq = ast->mclk;
-               param.vram_size = ast->vram_size;
+                switch (temp & 0x18000000) {
+               case 0:
+                       param.dram_chipid = AST_DRAM_512Mx16;
+                       break;
+               default:
+               case 0x08000000:
+                       param.dram_chipid = AST_DRAM_1Gx16;
+                       break;
+               case 0x10000000:
+                       param.dram_chipid = AST_DRAM_2Gx16;
+                       break;
+               case 0x18000000:
+                       param.dram_chipid = AST_DRAM_4Gx16;
+                       break;
+               }
+                switch (temp & 0x0c) {
+                default:
+               case 0x00:
+                       param.vram_size = AST_VIDMEM_SIZE_8M;
+                       break;
+
+               case 0x04:
+                       param.vram_size = AST_VIDMEM_SIZE_16M;
+                       break;
+
+               case 0x08:
+                       param.vram_size = AST_VIDMEM_SIZE_32M;
+                       break;
+
+               case 0x0c:
+                       param.vram_size = AST_VIDMEM_SIZE_64M;
+                       break;
+               }
 
                if (param.dram_type == AST_DDR3) {
                        get_ddr3_info(ast, &param);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c 
b/drivers/gpu/drm/drm_atomic_helper.c
index 1ac29d703c12..ea443fafb934 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -265,7 +265,7 @@ mode_fixup(struct drm_atomic_state *state)
        struct drm_connector *connector;
        struct drm_connector_state *conn_state;
        int i;
-       bool ret;
+       int ret;
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
                if (!crtc_state->mode_changed &&
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 8c9ac021608f..cc1e16fd7e76 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -144,6 +144,9 @@ static struct edid_quirk {
 
        /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
        { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+
+       /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
+       { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
 };
 
 /*
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3f802163f7d4..e7c18519274a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6803,7 +6803,18 @@ static void ivybridge_init_clock_gating(struct 
drm_device *dev)
 
 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
 {
-       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+        u32 val;
+
+        /*
+        * On driver load, a pipe may be active and driving a DSI display.
+        * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+        * (and never recovering) in this case. intel_dsi_post_disable() will
+        * clear it when we turn off the display.
+        */
+        val = I915_READ(DSPCLK_GATE_D);
+        val &= DPOUNIT_CLOCK_GATE_DISABLE;
+        val |= VRHUNIT_CLOCK_GATE_DISABLE;
+        I915_WRITE(DSPCLK_GATE_D, val);
 
        /*
         * Disable trickle feed and enable pnd deadline calculation
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4ae8b56b1847..037c38bb5333 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1621,7 +1621,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
        struct ttm_buffer_object *bo;
        int ret = -EBUSY;
        int put_count;
-       uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
 
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &glob->swap_lru, swap) {
@@ -1657,7 +1656,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
        if (unlikely(ret != 0))
                goto out;
 
-       if ((bo->mem.placement & swap_placement) != swap_placement) {
+       if (bo->mem.mem_type != TTM_PL_SYSTEM ||
+           bo->ttm->caching_state != tt_cached) {
                struct ttm_mem_reg evict_mem;
 
                evict_mem = bo->mem;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 89fd0113aa5c..57c191798699 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -219,7 +219,7 @@ int hv_init(void)
        /* See if the hypercall page is already set */
        rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
-       virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
+       virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
 
        if (!virtaddr)
                goto cleanup;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c 
b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 3ba7de5f9379..2018d24344de 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1488,12 +1488,14 @@ static ssize_t set_mode(struct device *d, struct 
device_attribute *attr,
 
        ret = ipoib_set_mode(dev, buf);
 
-       rtnl_unlock();
-
-       if (!ret)
-               return count;
+       /* The assumption is that the function ipoib_set_mode returned
+        * with the rtnl held by it, if not the value -EBUSY returned,
+        * then no need to rtnl_unlock
+        */
+       if (ret != -EBUSY)
+               rtnl_unlock();
 
-       return ret;
+       return (!ret || ret == -EBUSY) ? count : ret;
 }
 
 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c 
b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8a4d10452d61..8efcff1beb8f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -464,8 +464,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
                priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
 
                ipoib_flush_paths(dev);
-               rtnl_lock();
-               return 0;
+               return (!rtnl_trylock()) ? -EBUSY : 0;
        }
 
        if (!strcmp(buf, "datagram\n")) {
@@ -474,8 +473,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
                dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
                rtnl_unlock();
                ipoib_flush_paths(dev);
-               rtnl_lock();
-               return 0;
+               return (!rtnl_trylock()) ? -EBUSY : 0;
        }
 
        return -EINVAL;
@@ -628,6 +626,14 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
        spin_unlock_irq(&priv->lock);
 }
 
+static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
+{
+       struct ipoib_pseudo_header *phdr;
+
+       phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr));
+       memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+}
+
 void ipoib_flush_paths(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -852,8 +858,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
                        }
                        if (skb_queue_len(&neigh->queue) <
                            IPOIB_MAX_PATH_REC_QUEUE) {
-                               /* put pseudoheader back on for next time */
-                               skb_push(skb, IPOIB_PSEUDO_LEN);
+                               push_pseudo_header(skb, neigh->daddr);
                                __skb_queue_tail(&neigh->queue, skb);
                        } else {
                                ipoib_warn(priv, "queue length limit %d. Packet 
drop.\n",
@@ -871,10 +876,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
 
                if (!path->query && path_rec_start(dev, path))
                        goto err_path;
-               if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+               if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+                       push_pseudo_header(skb, neigh->daddr);
                        __skb_queue_tail(&neigh->queue, skb);
-               else
+               } else {
                        goto err_drop;
+               }
        }
 
        spin_unlock_irqrestore(&priv->lock, flags);
@@ -910,8 +917,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct 
net_device *dev,
                }
                if (path) {
                        if (skb_queue_len(&path->queue) < 
IPOIB_MAX_PATH_REC_QUEUE) {
-                               /* put pseudoheader back on for next time */
-                               skb_push(skb, IPOIB_PSEUDO_LEN);
+                               push_pseudo_header(skb, phdr->hwaddr);
                                __skb_queue_tail(&path->queue, skb);
                        } else {
                                ++dev->stats.tx_dropped;
@@ -943,8 +949,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct 
net_device *dev,
                return;
        } else if ((path->query || !path_rec_start(dev, path)) &&
                   skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
-               /* put pseudoheader back on for next time */
-               skb_push(skb, IPOIB_PSEUDO_LEN);
+               push_pseudo_header(skb, phdr->hwaddr);
                __skb_queue_tail(&path->queue, skb);
        } else {
                ++dev->stats.tx_dropped;
@@ -1025,8 +1030,7 @@ send_using_neigh:
        }
 
        if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
-               /* put pseudoheader back on for next time */
-               skb_push(skb, sizeof(*phdr));
+               push_pseudo_header(skb, phdr->hwaddr);
                spin_lock_irqsave(&priv->lock, flags);
                __skb_queue_tail(&neigh->queue, skb);
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -1058,7 +1062,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
                             unsigned short type,
                             const void *daddr, const void *saddr, unsigned len)
 {
-       struct ipoib_pseudo_header *phdr;
        struct ipoib_header *header;
 
        header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -1071,8 +1074,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
         * destination address into skb hard header so we can figure out where
         * to send the packet later.
         */
-       phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
-       memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+       push_pseudo_header(skb, daddr);
 
        return IPOIB_HARD_LEN;
 }
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c 
b/drivers/infiniband/ulp/srp/ib_srp.c
index 5f0f4fc58f43..e397f1b0af09 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1787,17 +1787,24 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, 
struct srp_rsp *rsp)
        if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
                spin_lock_irqsave(&ch->lock, flags);
                ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
+               if (rsp->tag == ch->tsk_mgmt_tag) {
+                       ch->tsk_mgmt_status = -1;
+                       if (be32_to_cpu(rsp->resp_data_len) >= 4)
+                               ch->tsk_mgmt_status = rsp->data[3];
+                       complete(&ch->tsk_mgmt_done);
+               } else {
+                       shost_printk(KERN_ERR, target->scsi_host,
+                                    "Received tsk mgmt response too late for 
tag %#llx\n",
+                                    rsp->tag);
+               }
                spin_unlock_irqrestore(&ch->lock, flags);
-
-               ch->tsk_mgmt_status = -1;
-               if (be32_to_cpu(rsp->resp_data_len) >= 4)
-                       ch->tsk_mgmt_status = rsp->data[3];
-               complete(&ch->tsk_mgmt_done);
        } else {
                scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
-               if (scmnd) {
+               if (scmnd && scmnd->host_scribble) {
                        req = (void *)scmnd->host_scribble;
                        scmnd = srp_claim_req(ch, req, NULL, scmnd);
+               } else {
+                       scmnd = NULL;
                }
                if (!scmnd) {
                        shost_printk(KERN_ERR, target->scsi_host,
@@ -2469,19 +2476,18 @@ srp_change_queue_depth(struct scsi_device *sdev, int 
qdepth)
 }
 
 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
-                            u8 func)
+                            u8 func, u8 *status)
 {
        struct srp_target_port *target = ch->target;
        struct srp_rport *rport = target->rport;
        struct ib_device *dev = target->srp_host->srp_dev->dev;
        struct srp_iu *iu;
        struct srp_tsk_mgmt *tsk_mgmt;
+       int res;
 
        if (!ch->connected || target->qp_in_error)
                return -1;
 
-       init_completion(&ch->tsk_mgmt_done);
-
        /*
         * Lock the rport mutex to avoid that srp_create_ch_ib() is
         * invoked while a task management function is being sent.
@@ -2504,10 +2510,16 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, 
u64 req_tag, u64 lun,
 
        tsk_mgmt->opcode        = SRP_TSK_MGMT;
        int_to_scsilun(lun, &tsk_mgmt->lun);
-       tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
        tsk_mgmt->tsk_mgmt_func = func;
        tsk_mgmt->task_tag      = req_tag;
 
+       spin_lock_irq(&ch->lock);
+       ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
+       tsk_mgmt->tag = ch->tsk_mgmt_tag;
+       spin_unlock_irq(&ch->lock);
+
+       init_completion(&ch->tsk_mgmt_done);
+
        ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
                                      DMA_TO_DEVICE);
        if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
@@ -2516,13 +2528,15 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, 
u64 req_tag, u64 lun,
 
                return -1;
        }
+       res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
+                                       msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
+       if (res > 0 && status)
+               *status = ch->tsk_mgmt_status;
        mutex_unlock(&rport->mutex);
 
-       if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
-                                        
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
-               return -1;
+       WARN_ON_ONCE(res < 0);
 
-       return 0;
+       return res > 0 ? 0 : -1;
 }
 
 static int srp_abort(struct scsi_cmnd *scmnd)
@@ -2548,7 +2562,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
        shost_printk(KERN_ERR, target->scsi_host,
                     "Sending SRP abort for tag %#x\n", tag);
        if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
-                             SRP_TSK_ABORT_TASK) == 0)
+                             SRP_TSK_ABORT_TASK, NULL) == 0)
                ret = SUCCESS;
        else if (target->rport->state == SRP_RPORT_LOST)
                ret = FAST_IO_FAIL;
@@ -2566,14 +2580,15 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_rdma_ch *ch;
        int i;
+       u8 status;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
 
        ch = &target->ch[0];
        if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
-                             SRP_TSK_LUN_RESET))
+                             SRP_TSK_LUN_RESET, &status))
                return FAILED;
-       if (ch->tsk_mgmt_status)
+       if (status)
                return FAILED;
 
        for (i = 0; i < target->ch_count; i++) {
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h 
b/drivers/infiniband/ulp/srp/ib_srp.h
index f6af531f9f32..109eea94d0f9 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -168,6 +168,7 @@ struct srp_rdma_ch {
        int                     max_ti_iu_len;
        int                     comp_vector;
 
+       u64                     tsk_mgmt_tag;
        struct completion       tsk_mgmt_done;
        u8                      tsk_mgmt_status;
        bool                    connected;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c 
b/drivers/net/ethernet/marvell/mvpp2.c
index 25aba9886990..0e67145bc418 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -993,7 +993,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu 
*txq_pcpu,
                txq_pcpu->buffs + txq_pcpu->txq_put_index;
        tx_buf->skb = skb;
        tx_buf->size = tx_desc->data_size;
-       tx_buf->phys = tx_desc->buf_phys_addr;
+       tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
        txq_pcpu->txq_put_index++;
        if (txq_pcpu->txq_put_index == txq_pcpu->size)
                txq_pcpu->txq_put_index = 0;
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 860d4aed8274..43617ded3773 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -30,7 +30,7 @@
 static int numlbs = 2;
 
 static LIST_HEAD(fakelb_phys);
-static DEFINE_SPINLOCK(fakelb_phys_lock);
+static DEFINE_MUTEX(fakelb_phys_lock);
 
 static LIST_HEAD(fakelb_ifup_phys);
 static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
@@ -180,9 +180,9 @@ static int fakelb_add_one(struct device *dev)
        if (err)
                goto err_reg;
 
-       spin_lock(&fakelb_phys_lock);
+       mutex_lock(&fakelb_phys_lock);
        list_add_tail(&phy->list, &fakelb_phys);
-       spin_unlock(&fakelb_phys_lock);
+       mutex_unlock(&fakelb_phys_lock);
 
        return 0;
 
@@ -214,10 +214,10 @@ static int fakelb_probe(struct platform_device *pdev)
        return 0;
 
 err_slave:
-       spin_lock(&fakelb_phys_lock);
+       mutex_lock(&fakelb_phys_lock);
        list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
                fakelb_del(phy);
-       spin_unlock(&fakelb_phys_lock);
+       mutex_unlock(&fakelb_phys_lock);
        return err;
 }
 
@@ -225,10 +225,10 @@ static int fakelb_remove(struct platform_device *pdev)
 {
        struct fakelb_phy *phy, *tmp;
 
-       spin_lock(&fakelb_phys_lock);
+       mutex_lock(&fakelb_phys_lock);
        list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
                fakelb_del(phy);
-       spin_unlock(&fakelb_phys_lock);
+       mutex_unlock(&fakelb_phys_lock);
        return 0;
 }
 
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 117fccf7934a..01a6a83f625d 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -65,7 +65,6 @@
 #define PCA9685_MAXCHAN                0x10
 
 #define LED_FULL               (1 << 4)
-#define MODE1_RESTART          (1 << 7)
 #define MODE1_SLEEP            (1 << 4)
 #define MODE2_INVRT            (1 << 4)
 #define MODE2_OUTDRV           (1 << 2)
@@ -117,16 +116,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, 
struct pwm_device *pwm,
                        udelay(500);
 
                        pca->period_ns = period_ns;
-
-                       /*
-                        * If the duty cycle did not change, restart PWM with
-                        * the same duty cycle to period ratio and return.
-                        */
-                       if (duty_ns == pca->duty_ns) {
-                               regmap_update_bits(pca->regmap, PCA9685_MODE1,
-                                                  MODE1_RESTART, 0x1);
-                               return 0;
-                       }
                } else {
                        dev_err(chip->dev,
                                "prescaler not set: period out of bounds!\n");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 94a8f4ab57bc..ae1dc37e4068 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -892,7 +892,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t 
secnum,
        dev_info = bdev->bd_disk->private_data;
        if (!dev_info)
                return -ENODEV;
-       dev_sz = dev_info->end - dev_info->start;
+       dev_sz = dev_info->end - dev_info->start + 1;
        offset = secnum * 512;
        addr = (void *) (dev_info->start + offset);
        *pfn = virt_to_phys(addr) >> PAGE_SHIFT;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 5d06253c2a7a..30e9fbbff051 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handlers(struct 
qdio_irq *irq)
        struct qdio_q *q;
        int i;
 
-       for_each_input_queue(irq, q, i) {
-               if (!references_shared_dsci(irq) &&
-                   has_multiple_inq_on_dsci(irq))
-                       xchg(q->irq_ptr->dsci, 0);
+       if (!references_shared_dsci(irq) &&
+           has_multiple_inq_on_dsci(irq))
+               xchg(irq->dsci, 0);
 
+       for_each_input_queue(irq, q, i) {
                if (q->u.in.queue_start_poll) {
                        /* skip if polling is enabled or already in work */
                        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
diff --git a/drivers/target/target_core_device.c 
b/drivers/target/target_core_device.c
index 356c80fbb304..bb6a6c35324a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -77,12 +77,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 
unpacked_lun)
                                        &deve->read_bytes);
 
                se_lun = rcu_dereference(deve->se_lun);
+
+               if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
+                       se_lun = NULL;
+                       goto out_unlock;
+               }
+
                se_cmd->se_lun = rcu_dereference(deve->se_lun);
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
-
-               percpu_ref_get(&se_lun->lun_ref);
                se_cmd->lun_ref_active = true;
 
                if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
@@ -96,6 +100,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 
unpacked_lun)
                        goto ref_dev;
                }
        }
+out_unlock:
        rcu_read_unlock();
 
        if (!se_lun) {
@@ -826,6 +831,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, 
const char *name)
        xcopy_lun = &dev->xcopy_lun;
        rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
        init_completion(&xcopy_lun->lun_ref_comp);
+       init_completion(&xcopy_lun->lun_shutdown_comp);
        INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
        INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
        mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 028854cda97b..2794c6ec5c3c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -539,7 +539,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref)
 {
        struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
 
-       complete(&lun->lun_ref_comp);
+       complete(&lun->lun_shutdown_comp);
 }
 
 int core_tpg_register(
@@ -666,6 +666,7 @@ struct se_lun *core_tpg_alloc_lun(
        lun->lun_link_magic = SE_LUN_LINK_MAGIC;
        atomic_set(&lun->lun_acl_count, 0);
        init_completion(&lun->lun_ref_comp);
+       init_completion(&lun->lun_shutdown_comp);
        INIT_LIST_HEAD(&lun->lun_deve_list);
        INIT_LIST_HEAD(&lun->lun_dev_link);
        atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
diff --git a/drivers/target/target_core_transport.c 
b/drivers/target/target_core_transport.c
index befe22744802..df2059984e14 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2680,10 +2680,39 @@ void target_wait_for_sess_cmds(struct se_session 
*se_sess)
 }
 EXPORT_SYMBOL(target_wait_for_sess_cmds);
 
+static void target_lun_confirm(struct percpu_ref *ref)
+{
+       struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+       complete(&lun->lun_ref_comp);
+}
+
 void transport_clear_lun_ref(struct se_lun *lun)
 {
-       percpu_ref_kill(&lun->lun_ref);
+       /*
+        * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
+        * the initial reference and schedule confirm kill to be
+        * executed after one full RCU grace period has completed.
+        */
+       percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
+       /*
+        * The first completion waits for percpu_ref_switch_to_atomic_rcu()
+        * to call target_lun_confirm after lun->lun_ref has been marked
+        * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
+        * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
+        * fails for all new incoming I/O.
+        */
        wait_for_completion(&lun->lun_ref_comp);
+       /*
+        * The second completion waits for percpu_ref_put_many() to
+        * invoke ->release() after lun->lun_ref has switched to
+        * atomic_t mode, and lun->lun_ref.count has reached zero.
+        *
+        * At this point all target-core lun->lun_ref references have
+        * been dropped via transport_lun_remove_cmd(), and it's safe
+        * to proceed with the remaining LUN shutdown.
+        */
+       wait_for_completion(&lun->lun_shutdown_comp);
 }
 
 static bool
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 644ddb841d9f..6d1e2f746ab4 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -114,7 +114,7 @@
 #define DEFAULT_TX_BUF_COUNT 3
 
 struct n_hdlc_buf {
-       struct n_hdlc_buf *link;
+       struct list_head  list_item;
        int               count;
        char              buf[1];
 };
@@ -122,8 +122,7 @@ struct n_hdlc_buf {
 #define        N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
 
 struct n_hdlc_buf_list {
-       struct n_hdlc_buf *head;
-       struct n_hdlc_buf *tail;
+       struct list_head  list;
        int               count;
        spinlock_t        spinlock;
 };
@@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
  * @backup_tty - TTY to use if tty gets closed
  * @tbusy - reentrancy flag for tx wakeup code
  * @woke_up - FIXME: describe this field
- * @tbuf - currently transmitting tx buffer
  * @tx_buf_list - list of pending transmit frame buffers
  * @rx_buf_list - list of received frame buffers
  * @tx_free_buf_list - list unused transmit frame buffers
@@ -149,7 +147,6 @@ struct n_hdlc {
        struct tty_struct       *backup_tty;
        int                     tbusy;
        int                     woke_up;
-       struct n_hdlc_buf       *tbuf;
        struct n_hdlc_buf_list  tx_buf_list;
        struct n_hdlc_buf_list  rx_buf_list;
        struct n_hdlc_buf_list  tx_free_buf_list;
@@ -159,7 +156,8 @@ struct n_hdlc {
 /*
  * HDLC buffer list manipulation functions
  */
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+                                               struct n_hdlc_buf *buf);
 static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
                           struct n_hdlc_buf *buf);
 static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
@@ -209,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
 {
        struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
        struct n_hdlc_buf *buf;
-       unsigned long flags;
 
        while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
                n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
-       spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
-       if (n_hdlc->tbuf) {
-               n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
-               n_hdlc->tbuf = NULL;
-       }
-       spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
 }
 
 static struct tty_ldisc_ops n_hdlc_ldisc = {
@@ -284,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
                } else
                        break;
        }
-       kfree(n_hdlc->tbuf);
        kfree(n_hdlc);
        
 }      /* end of n_hdlc_release() */
@@ -403,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, 
struct tty_struct *tty)
        n_hdlc->woke_up = 0;
        spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
 
-       /* get current transmit buffer or get new transmit */
-       /* buffer from list of pending transmit buffers */
-               
-       tbuf = n_hdlc->tbuf;
-       if (!tbuf)
-               tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
-               
+       tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
        while (tbuf) {
                if (debuglevel >= DEBUG_LEVEL_INFO)     
                        printk("%s(%d)sending frame %p, count=%d\n",
@@ -421,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, 
struct tty_struct *tty)
 
                /* rollback was possible and has been done */
                if (actual == -ERESTARTSYS) {
-                       n_hdlc->tbuf = tbuf;
+                       n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
                        break;
                }
                /* if transmit error, throw frame away by */
@@ -436,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, 
struct tty_struct *tty)
                                        
                        /* free current transmit buffer */
                        n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
-                       
-                       /* this tx buffer is done */
-                       n_hdlc->tbuf = NULL;
-                       
+
                        /* wait up sleeping writers */
                        wake_up_interruptible(&tty->write_wait);
        
@@ -449,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, 
struct tty_struct *tty)
                        if (debuglevel >= DEBUG_LEVEL_INFO)     
                                printk("%s(%d)frame %p pending\n",
                                        __FILE__,__LINE__,tbuf);
-                                       
-                       /* buffer not accepted by driver */
-                       /* set this buffer as pending buffer */
-                       n_hdlc->tbuf = tbuf;
+
+                       /*
+                        * the buffer was not accepted by driver,
+                        * return it back into tx queue
+                        */
+                       n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
                        break;
                }
        }
@@ -750,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct 
file *file,
        int error = 0;
        int count;
        unsigned long flags;
-       
+       struct n_hdlc_buf *buf = NULL;
+
        if (debuglevel >= DEBUG_LEVEL_INFO)     
                printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
                        __FILE__,__LINE__,cmd);
@@ -764,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct 
file *file,
                /* report count of read data available */
                /* in next available frame (if any) */
                spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
-               if (n_hdlc->rx_buf_list.head)
-                       count = n_hdlc->rx_buf_list.head->count;
+               buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
+                                               struct n_hdlc_buf, list_item);
+               if (buf)
+                       count = buf->count;
                else
                        count = 0;
                spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
@@ -777,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct 
file *file,
                count = tty_chars_in_buffer(tty);
                /* add size of next output frame in queue */
                spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
-               if (n_hdlc->tx_buf_list.head)
-                       count += n_hdlc->tx_buf_list.head->count;
+               buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
+                                               struct n_hdlc_buf, list_item);
+               if (buf)
+                       count += buf->count;
                spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
                error = put_user(count, (int __user *)arg);
                break;
@@ -826,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct 
*tty, struct file *filp,
                poll_wait(filp, &tty->write_wait, wait);
 
                /* set bits for operations that won't block */
-               if (n_hdlc->rx_buf_list.head)
+               if (!list_empty(&n_hdlc->rx_buf_list.list))
                        mask |= POLLIN | POLLRDNORM;    /* readable */
                if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
                        mask |= POLLHUP;
                if (tty_hung_up_p(filp))
                        mask |= POLLHUP;
                if (!tty_is_writelocked(tty) &&
-                               n_hdlc->tx_free_buf_list.head)
+                               !list_empty(&n_hdlc->tx_free_buf_list.list))
                        mask |= POLLOUT | POLLWRNORM;   /* writable */
        }
        return mask;
@@ -853,11 +841,16 @@ static struct n_hdlc *n_hdlc_alloc(void)
        if (!n_hdlc)
                return NULL;
 
-       n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
-       n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
-       n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
-       n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
-       
+       spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
+       spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
+       spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
+       spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
+
+       INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
+       INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
+       INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
+       INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
+
        /* allocate free rx buffer list */
        for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
                buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
@@ -885,63 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
 }      /* end of n_hdlc_alloc() */
 
 /**
- * n_hdlc_buf_list_init - initialize specified HDLC buffer list
- * @list - pointer to buffer list
+ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
+ * @buf_list - pointer to the buffer list
+ * @buf - pointer to the buffer
  */
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+                                               struct n_hdlc_buf *buf)
 {
-       memset(list, 0, sizeof(*list));
-       spin_lock_init(&list->spinlock);
-}      /* end of n_hdlc_buf_list_init() */
+       unsigned long flags;
+
+       spin_lock_irqsave(&buf_list->spinlock, flags);
+
+       list_add(&buf->list_item, &buf_list->list);
+       buf_list->count++;
+
+       spin_unlock_irqrestore(&buf_list->spinlock, flags);
+}
 
 /**
  * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
- * @list - pointer to buffer list
+ * @buf_list - pointer to buffer list
  * @buf        - pointer to buffer
  */
-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
                           struct n_hdlc_buf *buf)
 {
        unsigned long flags;
-       spin_lock_irqsave(&list->spinlock,flags);
-       
-       buf->link=NULL;
-       if (list->tail)
-               list->tail->link = buf;
-       else
-               list->head = buf;
-       list->tail = buf;
-       (list->count)++;
-       
-       spin_unlock_irqrestore(&list->spinlock,flags);
-       
+
+       spin_lock_irqsave(&buf_list->spinlock, flags);
+
+       list_add_tail(&buf->list_item, &buf_list->list);
+       buf_list->count++;
+
+       spin_unlock_irqrestore(&buf_list->spinlock, flags);
 }      /* end of n_hdlc_buf_put() */
 
 /**
  * n_hdlc_buf_get - remove and return an HDLC buffer from list
- * @list - pointer to HDLC buffer list
+ * @buf_list - pointer to HDLC buffer list
  * 
  * Remove and return an HDLC buffer from the head of the specified HDLC buffer
  * list.
  * Returns a pointer to HDLC buffer if available, otherwise %NULL.
  */
-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
+static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
 {
        unsigned long flags;
        struct n_hdlc_buf *buf;
-       spin_lock_irqsave(&list->spinlock,flags);
-       
-       buf = list->head;
+
+       spin_lock_irqsave(&buf_list->spinlock, flags);
+
+       buf = list_first_entry_or_null(&buf_list->list,
+                                               struct n_hdlc_buf, list_item);
        if (buf) {
-               list->head = buf->link;
-               (list->count)--;
+               list_del(&buf->list_item);
+               buf_list->count--;
        }
-       if (!list->head)
-               list->tail = NULL;
-       
-       spin_unlock_irqrestore(&list->spinlock,flags);
+
+       spin_unlock_irqrestore(&buf_list->spinlock, flags);
        return buf;
-       
 }      /* end of n_hdlc_buf_get() */
 
 static char hdlc_banner[] __initdata =
diff --git a/drivers/tty/serial/8250/8250_pci.c 
b/drivers/tty/serial/8250/8250_pci.c
index 029de3f99752..5b24ffd93649 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2880,6 +2880,8 @@ enum pci_board_num_t {
        pbn_b0_4_1152000_200,
        pbn_b0_8_1152000_200,
 
+       pbn_b0_4_1250000,
+
        pbn_b0_2_1843200,
        pbn_b0_4_1843200,
 
@@ -3113,6 +3115,13 @@ static struct pciserial_board pci_boards[] = {
                .uart_offset    = 0x200,
        },
 
+       [pbn_b0_4_1250000] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 4,
+               .base_baud      = 1250000,
+               .uart_offset    = 8,
+       },
+
        [pbn_b0_2_1843200] = {
                .flags          = FL_BASE0,
                .num_ports      = 2,
@@ -5778,6 +5787,10 @@ static struct pci_device_id serial_pci_tbl[] = {
        { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
        { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 },
 
+       /* MKS Tenta SCOM-080x serial cards */
+       { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
+       { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
+
        /*
         * These entries match devices with class COMMUNICATION_SERIAL,
         * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 239bc9cba28c..f54f77037d22 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -644,6 +644,9 @@ static void __unregister_request(struct ceph_mds_client 
*mdsc,
 {
        dout("__unregister_request %p tid %lld\n", req, req->r_tid);
 
+       /* Never leave an unregistered request on an unsafe list! */
+       list_del_init(&req->r_unsafe_item);
+
        if (req->r_tid == mdsc->oldest_tid) {
                struct rb_node *p = rb_next(&req->r_node);
                mdsc->oldest_tid = 0;
@@ -1051,7 +1054,6 @@ static void cleanup_session_requests(struct 
ceph_mds_client *mdsc,
        while (!list_empty(&session->s_unsafe)) {
                req = list_first_entry(&session->s_unsafe,
                                       struct ceph_mds_request, r_unsafe_item);
-               list_del_init(&req->r_unsafe_item);
                pr_warn_ratelimited(" dropping unsafe request %llu\n",
                                    req->r_tid);
                __unregister_request(mdsc, req);
@@ -2477,7 +2479,6 @@ static void handle_reply(struct ceph_mds_session 
*session, struct ceph_msg *msg)
                         * useful we could do with a revised return value.
                         */
                        dout("got safe reply %llu, mds%d\n", tid, mds);
-                       list_del_init(&req->r_unsafe_item);
 
                        /* last unsafe request during umount? */
                        if (mdsc->stopping && !__get_oldest_req(mdsc))
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 509411dd3698..cf644d52c0cf 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1269,6 +1269,16 @@ out:
        return 0;
 }
 
+static void fat_dummy_inode_init(struct inode *inode)
+{
+       /* Initialize this dummy inode to work as no-op. */
+       MSDOS_I(inode)->mmu_private = 0;
+       MSDOS_I(inode)->i_start = 0;
+       MSDOS_I(inode)->i_logstart = 0;
+       MSDOS_I(inode)->i_attrs = 0;
+       MSDOS_I(inode)->i_pos = 0;
+}
+
 static int fat_read_root(struct inode *inode)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1713,12 +1723,13 @@ int fat_fill_super(struct super_block *sb, void *data, 
int silent, int isvfat,
        fat_inode = new_inode(sb);
        if (!fat_inode)
                goto out_fail;
-       MSDOS_I(fat_inode)->i_pos = 0;
+       fat_dummy_inode_init(fat_inode);
        sbi->fat_inode = fat_inode;
 
        fsinfo_inode = new_inode(sb);
        if (!fsinfo_inode)
                goto out_fail;
+       fat_dummy_inode_init(fsinfo_inode);
        fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
        sbi->fsinfo_inode = fsinfo_inode;
        insert_inode_hash(fsinfo_inode);
diff --git a/fs/mount.h b/fs/mount.h
index 14db05d424f7..3dc7dea5a357 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -86,7 +86,6 @@ static inline int is_mounted(struct vfsmount *mnt)
 }
 
 extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
-extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
 
 extern int __legitimize_mnt(struct vfsmount *, unsigned);
 extern bool legitimize_mnt(struct vfsmount *, unsigned);
diff --git a/fs/namespace.c b/fs/namespace.c
index da98a1bbd8b5..7df3d406d3e0 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -638,28 +638,6 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct 
dentry *dentry)
 }
 
 /*
- * find the last mount at @dentry on vfsmount @mnt.
- * mount_lock must be held.
- */
-struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
-{
-       struct mount *p, *res = NULL;
-       p = __lookup_mnt(mnt, dentry);
-       if (!p)
-               goto out;
-       if (!(p->mnt.mnt_flags & MNT_UMOUNT))
-               res = p;
-       hlist_for_each_entry_continue(p, mnt_hash) {
-               if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
-                       break;
-               if (!(p->mnt.mnt_flags & MNT_UMOUNT))
-                       res = p;
-       }
-out:
-       return res;
-}
-
-/*
  * lookup_mnt - Return the first child mount mounted at path
  *
  * "First" means first mounted chronologically.  If you create the
@@ -879,6 +857,13 @@ void mnt_set_mountpoint(struct mount *mnt,
        hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
 }
 
+static void __attach_mnt(struct mount *mnt, struct mount *parent)
+{
+       hlist_add_head_rcu(&mnt->mnt_hash,
+                          m_hash(&parent->mnt, mnt->mnt_mountpoint));
+       list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+}
+
 /*
  * vfsmount lock must be held for write
  */
@@ -887,28 +872,45 @@ static void attach_mnt(struct mount *mnt,
                        struct mountpoint *mp)
 {
        mnt_set_mountpoint(parent, mp, mnt);
-       hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
-       list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+       __attach_mnt(mnt, parent);
 }
 
-static void attach_shadowed(struct mount *mnt,
-                       struct mount *parent,
-                       struct mount *shadows)
+void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct 
mount *mnt)
 {
-       if (shadows) {
-               hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
-               list_add(&mnt->mnt_child, &shadows->mnt_child);
-       } else {
-               hlist_add_head_rcu(&mnt->mnt_hash,
-                               m_hash(&parent->mnt, mnt->mnt_mountpoint));
-               list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
-       }
+       struct mountpoint *old_mp = mnt->mnt_mp;
+       struct dentry *old_mountpoint = mnt->mnt_mountpoint;
+       struct mount *old_parent = mnt->mnt_parent;
+
+       list_del_init(&mnt->mnt_child);
+       hlist_del_init(&mnt->mnt_mp_list);
+       hlist_del_init_rcu(&mnt->mnt_hash);
+
+       attach_mnt(mnt, parent, mp);
+
+       put_mountpoint(old_mp);
+
+       /*
+        * Safely avoid even the suggestion this code might sleep or
+        * lock the mount hash by taking advantage of the knowledge that
+        * mnt_change_mountpoint will not release the final reference
+        * to a mountpoint.
+        *
+        * During mounting, the mount passed in as the parent mount will
+        * continue to use the old mountpoint and during unmounting, the
+        * old mountpoint will continue to exist until namespace_unlock,
+        * which happens well after mnt_change_mountpoint.
+        */
+       spin_lock(&old_mountpoint->d_lock);
+       old_mountpoint->d_lockref.count--;
+       spin_unlock(&old_mountpoint->d_lock);
+
+       mnt_add_count(old_parent, -1);
 }
 
 /*
  * vfsmount lock must be held for write
  */
-static void commit_tree(struct mount *mnt, struct mount *shadows)
+static void commit_tree(struct mount *mnt)
 {
        struct mount *parent = mnt->mnt_parent;
        struct mount *m;
@@ -923,7 +925,7 @@ static void commit_tree(struct mount *mnt, struct mount 
*shadows)
 
        list_splice(&head, n->list.prev);
 
-       attach_shadowed(mnt, parent, shadows);
+       __attach_mnt(mnt, parent);
        touch_mnt_namespace(n);
 }
 
@@ -1718,7 +1720,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry 
*dentry,
                        continue;
 
                for (s = r; s; s = next_mnt(s, r)) {
-                       struct mount *t = NULL;
                        if (!(flag & CL_COPY_UNBINDABLE) &&
                            IS_MNT_UNBINDABLE(s)) {
                                s = skip_mnt_tree(s);
@@ -1740,14 +1741,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry 
*dentry,
                                goto out;
                        lock_mount_hash();
                        list_add_tail(&q->mnt_list, &res->mnt_list);
-                       mnt_set_mountpoint(parent, p->mnt_mp, q);
-                       if (!list_empty(&parent->mnt_mounts)) {
-                               t = list_last_entry(&parent->mnt_mounts,
-                                       struct mount, mnt_child);
-                               if (t->mnt_mp != p->mnt_mp)
-                                       t = NULL;
-                       }
-                       attach_shadowed(q, parent, t);
+                       attach_mnt(q, parent, p->mnt_mp);
                        unlock_mount_hash();
                }
        }
@@ -1925,10 +1919,18 @@ static int attach_recursive_mnt(struct mount 
*source_mnt,
                        struct path *parent_path)
 {
        HLIST_HEAD(tree_list);
+       struct mountpoint *smp;
        struct mount *child, *p;
        struct hlist_node *n;
        int err;
 
+       /* Preallocate a mountpoint in case the new mounts need
+        * to be tucked under other mounts.
+        */
+       smp = get_mountpoint(source_mnt->mnt.mnt_root);
+       if (IS_ERR(smp))
+               return PTR_ERR(smp);
+
        if (IS_MNT_SHARED(dest_mnt)) {
                err = invent_group_ids(source_mnt, true);
                if (err)
@@ -1948,16 +1950,19 @@ static int attach_recursive_mnt(struct mount 
*source_mnt,
                touch_mnt_namespace(source_mnt->mnt_ns);
        } else {
                mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
-               commit_tree(source_mnt, NULL);
+               commit_tree(source_mnt);
        }
 
        hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
                struct mount *q;
                hlist_del_init(&child->mnt_hash);
-               q = __lookup_mnt_last(&child->mnt_parent->mnt,
-                                     child->mnt_mountpoint);
-               commit_tree(child, q);
+               q = __lookup_mnt(&child->mnt_parent->mnt,
+                                child->mnt_mountpoint);
+               if (q)
+                       mnt_change_mountpoint(child, smp, q);
+               commit_tree(child);
        }
+       put_mountpoint(smp);
        unlock_mount_hash();
 
        return 0;
@@ -1970,6 +1975,10 @@ static int attach_recursive_mnt(struct mount *source_mnt,
        unlock_mount_hash();
        cleanup_group_ids(source_mnt, NULL);
  out:
+       read_seqlock_excl(&mount_lock);
+       put_mountpoint(smp);
+       read_sequnlock_excl(&mount_lock);
+
        return err;
 }
 
diff --git a/fs/pnode.c b/fs/pnode.c
index 99899705b105..b9f2af59b9a6 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -324,6 +324,21 @@ out:
        return ret;
 }
 
+static struct mount *find_topper(struct mount *mnt)
+{
+       /* If there is exactly one mount covering mnt completely return it. */
+       struct mount *child;
+
+       if (!list_is_singular(&mnt->mnt_mounts))
+               return NULL;
+
+       child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
+       if (child->mnt_mountpoint != mnt->mnt.mnt_root)
+               return NULL;
+
+       return child;
+}
+
 /*
  * return true if the refcount is greater than count
  */
@@ -344,9 +359,8 @@ static inline int do_refcount_check(struct mount *mnt, int 
count)
  */
 int propagate_mount_busy(struct mount *mnt, int refcnt)
 {
-       struct mount *m, *child;
+       struct mount *m, *child, *topper;
        struct mount *parent = mnt->mnt_parent;
-       int ret = 0;
 
        if (mnt == parent)
                return do_refcount_check(mnt, refcnt);
@@ -361,12 +375,24 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
 
        for (m = propagation_next(parent, parent); m;
                        m = propagation_next(m, parent)) {
-               child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
-               if (child && list_empty(&child->mnt_mounts) &&
-                   (ret = do_refcount_check(child, 1)))
-                       break;
+               int count = 1;
+               child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
+               if (!child)
+                       continue;
+
+               /* Is there exactly one mount on the child that covers
+                * it completely whose reference should be ignored?
+                */
+               topper = find_topper(child);
+               if (topper)
+                       count += 1;
+               else if (!list_empty(&child->mnt_mounts))
+                       continue;
+
+               if (do_refcount_check(child, count))
+                       return 1;
        }
-       return ret;
+       return 0;
 }
 
 /*
@@ -383,7 +409,7 @@ void propagate_mount_unlock(struct mount *mnt)
 
        for (m = propagation_next(parent, parent); m;
                        m = propagation_next(m, parent)) {
-               child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
+               child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
                if (child)
                        child->mnt.mnt_flags &= ~MNT_LOCKED;
        }
@@ -401,9 +427,11 @@ static void mark_umount_candidates(struct mount *mnt)
 
        for (m = propagation_next(parent, parent); m;
                        m = propagation_next(m, parent)) {
-               struct mount *child = __lookup_mnt_last(&m->mnt,
+               struct mount *child = __lookup_mnt(&m->mnt,
                                                mnt->mnt_mountpoint);
-               if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
+               if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
+                       continue;
+               if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
                        SET_MNT_MARK(child);
                }
        }
@@ -422,8 +450,8 @@ static void __propagate_umount(struct mount *mnt)
 
        for (m = propagation_next(parent, parent); m;
                        m = propagation_next(m, parent)) {
-
-               struct mount *child = __lookup_mnt_last(&m->mnt,
+               struct mount *topper;
+               struct mount *child = __lookup_mnt(&m->mnt,
                                                mnt->mnt_mountpoint);
                /*
                 * umount the child only if the child has no children
@@ -432,6 +460,15 @@ static void __propagate_umount(struct mount *mnt)
                if (!child || !IS_MNT_MARKED(child))
                        continue;
                CLEAR_MNT_MARK(child);
+
+               /* If there is exactly one mount covering all of child
+                * replace child with that mount.
+                */
+               topper = find_topper(child);
+               if (topper)
+                       mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
+                                             topper);
+
                if (list_empty(&child->mnt_mounts)) {
                        list_del_init(&child->mnt_child);
                        child->mnt.mnt_flags |= MNT_UMOUNT;
diff --git a/fs/pnode.h b/fs/pnode.h
index 0fcdbe7ca648..623f01772bec 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -49,6 +49,8 @@ int get_dominating_id(struct mount *mnt, const struct path 
*root);
 unsigned int mnt_get_count(struct mount *mnt);
 void mnt_set_mountpoint(struct mount *, struct mountpoint *,
                        struct mount *);
+void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
+                          struct mount *mnt);
 struct mount *copy_tree(struct mount *, struct dentry *, int);
 bool is_path_reachable(struct mount *, struct dentry *,
                         const struct path *root);
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index e55c08bc3a96..0abc56140c83 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -49,7 +49,7 @@ static inline bool ceph_can_shift_osds(struct 
ceph_pg_pool_info *pool)
        case CEPH_POOL_TYPE_EC:
                return false;
        default:
-               BUG_ON(1);
+               BUG();
        }
 }
 
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index c15373894a42..b37dee3acaba 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct 
svc_rqst *rqstp)
 static inline int nlm_compare_locks(const struct file_lock *fl1,
                                    const struct file_lock *fl2)
 {
-       return  fl1->fl_pid   == fl2->fl_pid
+       return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
+            && fl1->fl_pid   == fl2->fl_pid
             && fl1->fl_owner == fl2->fl_owner
             && fl1->fl_start == fl2->fl_start
             && fl1->fl_end   == fl2->fl_end
diff --git a/include/target/target_core_base.h 
b/include/target/target_core_base.h
index 800fe16cc36f..ed66414b91f0 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -740,6 +740,7 @@ struct se_lun {
        struct config_group     lun_group;
        struct se_port_stat_grps port_stat_grps;
        struct completion       lun_ref_comp;
+       struct completion       lun_shutdown_comp;
        struct percpu_ref       lun_ref;
        struct list_head        lun_dev_link;
        struct hlist_node       link;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 00a43a70e1fc..0402fa45b343 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct 
cfg80211_wowlan *wowlan)
                        break;
                }
 
+               flush_delayed_work(&sdata->dec_tailroom_needed_wk);
                drv_remove_interface(local, sdata);
        }
 
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index d08e214ec6e7..223d88e25e05 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -2629,7 +2629,7 @@ sub do_run_test {
     }
 
     waitpid $child_pid, 0;
-    $child_exit = $?;
+    $child_exit = $? >> 8;
 
     my $end_time = time;
     $test_time = $end_time - $start_time;

Reply via email to