diff --git a/Makefile b/Makefile
index 63a5ee858cc3..944db2356a7c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 14
-SUBLEVEL = 26
+SUBLEVEL = 27
 EXTRAVERSION =
 NAME = Remembering Coco
 
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S 
b/arch/powerpc/kernel/vdso32/getcpu.S
index 47afd08c90f7..fe7e97a1aad9 100644
--- a/arch/powerpc/kernel/vdso32/getcpu.S
+++ b/arch/powerpc/kernel/vdso32/getcpu.S
@@ -30,8 +30,8 @@
 V_FUNCTION_BEGIN(__kernel_getcpu)
   .cfi_startproc
        mfspr   r5,SPRN_USPRG3
-       cmpdi   cr0,r3,0
-       cmpdi   cr1,r4,0
+       cmpwi   cr0,r3,0
+       cmpwi   cr1,r4,0
        clrlwi  r6,r5,16
        rlwinm  r7,r5,16,31-15,31-0
        beq     cr0,1f
diff --git a/arch/x86/boot/compressed/Makefile 
b/arch/x86/boot/compressed/Makefile
index 14fe7cba21d1..b5bb49866bcc 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -75,7 +75,7 @@ suffix-$(CONFIG_KERNEL_XZ)    := xz
 suffix-$(CONFIG_KERNEL_LZO)    := lzo
 suffix-$(CONFIG_KERNEL_LZ4)    := lz4
 
-RUN_SIZE = $(shell objdump -h vmlinux | \
+RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
             perl $(srctree)/arch/x86/tools/calc_run_size.pl)
 quiet_cmd_mkpiggy = MKPIGGY $@
       cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 79f9f848bee4..fb345c43815f 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event 
*event)
                        continue;
                if (event->attr.config1 & ~er->valid_mask)
                        return -EINVAL;
+               /* Check if the extra msrs can be safely accessed*/
+               if (!er->extra_msr_access)
+                       return -ENXIO;
 
                reg->idx = er->idx;
                reg->config = event->attr.config1;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 4972c244d0bc..7876c346ed1a 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -293,14 +293,16 @@ struct extra_reg {
        u64                     config_mask;
        u64                     valid_mask;
        int                     idx;  /* per_xxx->regs[] reg index */
+       bool                    extra_msr_access;
 };
 
 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {     \
-       .event = (e),           \
-       .msr = (ms),            \
-       .config_mask = (m),     \
-       .valid_mask = (vm),     \
-       .idx = EXTRA_REG_##i,   \
+       .event = (e),                   \
+       .msr = (ms),                    \
+       .config_mask = (m),             \
+       .valid_mask = (vm),             \
+       .idx = EXTRA_REG_##i,           \
+       .extra_msr_access = true,       \
        }
 
 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)     \
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c 
b/arch/x86/kernel/cpu/perf_event_intel.c
index 5ee8064bd1d2..d4c0a0e46040 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2183,6 +2183,41 @@ static void intel_snb_check_microcode(void)
        }
 }
 
+/*
+ * Under certain circumstances, access certain MSR may cause #GP.
+ * The function tests if the input MSR can be safely accessed.
+ */
+static bool check_msr(unsigned long msr, u64 mask)
+{
+       u64 val_old, val_new, val_tmp;
+
+       /*
+        * Read the current value, change it and read it back to see if it
+        * matches, this is needed to detect certain hardware emulators
+        * (qemu/kvm) that don't trap on the MSR access and always return 0s.
+        */
+       if (rdmsrl_safe(msr, &val_old))
+               return false;
+
+       /*
+        * Only change the bits which can be updated by wrmsrl.
+        */
+       val_tmp = val_old ^ mask;
+       if (wrmsrl_safe(msr, val_tmp) ||
+           rdmsrl_safe(msr, &val_new))
+               return false;
+
+       if (val_new != val_tmp)
+               return false;
+
+       /* Here it's sure that the MSR can be safely accessed.
+        * Restore the old value and return.
+        */
+       wrmsrl(msr, val_old);
+
+       return true;
+}
+
 static __init void intel_sandybridge_quirk(void)
 {
        x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2272,7 +2307,8 @@ __init int intel_pmu_init(void)
        union cpuid10_ebx ebx;
        struct event_constraint *c;
        unsigned int unused;
-       int version;
+       struct extra_reg *er;
+       int version, i;
 
        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
                switch (boot_cpu_data.x86) {
@@ -2578,6 +2614,34 @@ __init int intel_pmu_init(void)
                }
        }
 
+       /*
+        * Access LBR MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support LBR MSR
+        * Check all LBT MSR here.
+        * Disable LBR access if any LBR MSRs can not be accessed.
+        */
+       if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+               x86_pmu.lbr_nr = 0;
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+                     check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
+                       x86_pmu.lbr_nr = 0;
+       }
+
+       /*
+        * Access extra MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support offcore event
+        * Check all extra_regs here.
+        */
+       if (x86_pmu.extra_regs) {
+               for (er = x86_pmu.extra_regs; er->msr; er++) {
+                       er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+                       /* Disable LBR select mapping */
+                       if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+                               x86_pmu.lbr_sel_map = NULL;
+               }
+       }
+
        /* Support full width counters using alternative MSR range */
        if (x86_pmu.intel_cap.full_width_write) {
                x86_pmu.max_period = x86_pmu.cntval_mask;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e662f147d436..cc5f102bebf3 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -322,6 +322,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
+       { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
        { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
@@ -493,6 +496,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
         * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
         */
        { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
+       { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
 
        /* Enmotus */
        { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index fb0b40a191c2..ee2780dd90a8 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1503,7 +1503,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
        host_priv->csr_base = csr_base;
 
        irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (irq < 0) {
+       if (!irq) {
                dev_err(&ofdev->dev, "invalid irq from platform\n");
                goto error_exit_with_cleanup;
        }
diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index b6fb3ebe553a..c51469051e41 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3817,7 +3817,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
                ironlake_fdi_disable(crtc);
 
                ironlake_disable_pch_transcoder(dev_priv, pipe);
-               intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
                if (HAS_PCH_CPT(dev)) {
                        /* disable TRANS_DP_CTL */
@@ -3883,7 +3882,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        if (intel_crtc->config.has_pch_encoder) {
                lpt_disable_pch_transcoder(dev_priv);
-               intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
                intel_ddi_fdi_disable(crtc);
        }
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c 
b/drivers/gpu/drm/i915/intel_lvds.c
index 67c9ff389989..af49b24d14cb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -905,6 +905,17 @@ void intel_lvds_init(struct drm_device *dev)
        int pipe;
        u8 pin;
 
+       /*
+        * Unlock registers and just leave them unlocked. Do this before
+        * checking quirk lists to avoid bogus WARNINGs.
+        */
+       if (HAS_PCH_SPLIT(dev)) {
+               I915_WRITE(PCH_PP_CONTROL,
+                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+       } else {
+               I915_WRITE(PP_CONTROL,
+                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+       }
        if (!intel_lvds_supported(dev))
                return;
 
@@ -1099,17 +1110,6 @@ out:
        DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
                      lvds_encoder->is_dual_link ? "dual" : "single");
 
-       /*
-        * Unlock registers and just
-        * leave them unlocked
-        */
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_PP_CONTROL,
-                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-       } else {
-               I915_WRITE(PP_CONTROL,
-                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
-       }
        lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
        if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
                DRM_DEBUG_KMS("lid notifier registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c 
b/drivers/gpu/drm/radeon/radeon_kms.c
index 0bc9106ef435..6bffe82f241c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -740,6 +740,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, 
int crtc,
 
        /* Get associated drm_crtc: */
        drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+       if (!drmcrtc)
+               return -EINVAL;
 
        /* Helper routine in DRM core does all the work: */
        return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index af0b5830303d..e3c6a96717fc 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -411,11 +411,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct 
i2c_msg *msg, int stop)
        if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
                if (msg->flags & I2C_M_IGNORE_NAK)
                        return msg->len;
-               if (stop) {
-                       w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
-                       w |= DAVINCI_I2C_MDR_STP;
-                       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
-               }
+               w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+               w |= DAVINCI_I2C_MDR_STP;
+               davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
                return -EREMOTEIO;
        }
        return -EIO;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 90dcc2eaac5f..a6860570391f 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -926,14 +926,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                if (stat & OMAP_I2C_STAT_NACK) {
                        err |= OMAP_I2C_STAT_NACK;
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
-                       break;
                }
 
                if (stat & OMAP_I2C_STAT_AL) {
                        dev_err(dev->dev, "Arbitration lost\n");
                        err |= OMAP_I2C_STAT_AL;
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
-                       break;
                }
 
                /*
@@ -958,11 +956,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                        if (dev->fifo_size)
                                num_bytes = dev->buf_len;
 
-                       omap_i2c_receive_data(dev, num_bytes, true);
-
-                       if (dev->errata & I2C_OMAP_ERRATA_I207)
+                       if (dev->errata & I2C_OMAP_ERRATA_I207) {
                                i2c_omap_errata_i207(dev, stat);
+                               num_bytes = (omap_i2c_read_reg(dev,
+                                       OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
+                       }
 
+                       omap_i2c_receive_data(dev, num_bytes, true);
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
                        continue;
                }
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c 
b/drivers/media/i2c/smiapp/smiapp-core.c
index 8741cae9c9f2..7026ab08ec91 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2138,7 +2138,7 @@ static int smiapp_set_selection(struct v4l2_subdev 
*subdev,
                ret = smiapp_set_compose(subdev, fh, sel);
                break;
        default:
-               BUG();
+               ret = -EINVAL;
        }
 
        mutex_unlock(&sensor->mutex);
diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
index 9373f1f59605..086eac5af5c2 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8548,7 +8548,8 @@ static int tg3_init_rings(struct tg3 *tp)
                if (tnapi->rx_rcb)
                        memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 
-               if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+               if (tnapi->prodring.rx_std &&
+                   tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
                        tg3_free_rings(tp);
                        return -ENOMEM;
                }
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c 
b/drivers/net/ethernet/intel/igb/igb_main.c
index 5ca8c479666e..206e79d29c79 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1613,6 +1613,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
                igb_power_up_phy_copper(&adapter->hw);
        else
                igb_power_up_serdes_link_82575(&adapter->hw);
+
+       igb_setup_link(&adapter->hw);
 }
 
 /**
diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
index c4c00d9f2c04..96fc7fe8519f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -213,7 +213,7 @@
 /* Various constants */
 
 /* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS                16
+#define MVNETA_TXDONE_COAL_PKTS                1
 #define MVNETA_RX_COAL_PKTS            32
 #define MVNETA_RX_COAL_USEC            100
 
@@ -1612,6 +1612,7 @@ static int mvneta_tx(struct sk_buff *skb, struct 
net_device *dev)
        u16 txq_id = skb_get_queue_mapping(skb);
        struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
        struct mvneta_tx_desc *tx_desc;
+       int len = skb->len;
        struct netdev_queue *nq;
        int frags = 0;
        u32 tx_cmd;
@@ -1675,7 +1676,7 @@ out:
 
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
-               stats->tx_bytes  += skb->len;
+               stats->tx_bytes  += len;
                u64_stats_update_end(&stats->syncp);
        } else {
                dev->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 
b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 57428a0cb9dd..1e8a4b411dc6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1456,7 +1456,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, 
int op, int cmd,
 
        switch (op) {
        case RES_OP_RESERVE:
-               count = get_param_l(&in_param);
+               count = get_param_l(&in_param) & 0xffffff;
                align = get_param_h(&in_param);
                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
                if (err)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5441b49ef89d..5988910db23e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2106,9 +2106,8 @@ static int vxlan_init(struct net_device *dev)
        spin_lock(&vn->sock_lock);
        vs = vxlan_find_sock(dev_net(dev), ipv6 ? AF_INET6 : AF_INET,
                             vxlan->dst_port);
-       if (vs) {
+       if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
                /* If we have a socket with same port already, reuse it */
-               atomic_inc(&vs->refcnt);
                vxlan_vs_add_dev(vs, vxlan);
        } else {
                /* otherwise make new socket outside of RTNL */
@@ -2574,12 +2573,9 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, 
__be16 port,
 
        spin_lock(&vn->sock_lock);
        vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
-       if (vs) {
-               if (vs->rcv == rcv)
-                       atomic_inc(&vs->refcnt);
-               else
+       if (vs && ((vs->rcv != rcv) ||
+                  !atomic_add_unless(&vs->refcnt, 1, 0)))
                        vs = ERR_PTR(-EBUSY);
-       }
        spin_unlock(&vn->sock_lock);
 
        if (!vs)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e30d80033cbc..19db057658c5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -469,9 +469,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct 
net_device *dev,
                len = skb_frag_size(frag);
                offset = frag->page_offset;
 
-               /* Data must not cross a page boundary. */
-               BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
-
                /* Skip unused frames from start of page */
                page += offset >> PAGE_SHIFT;
                offset &= ~PAGE_MASK;
@@ -479,8 +476,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct 
net_device *dev,
                while (len > 0) {
                        unsigned long bytes;
 
-                       BUG_ON(offset >= PAGE_SIZE);
-
                        bytes = PAGE_SIZE - offset;
                        if (bytes > len)
                                bytes = len;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1710a8678bcb..faa8b98954d9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1998,22 +1998,13 @@ static int finish_td(struct xhci_hcd *xhci, struct 
xhci_td *td,
                ep->stopped_td = td;
                return 0;
        } else {
-               if (trb_comp_code == COMP_STALL) {
-                       /* The transfer is completed from the driver's
-                        * perspective, but we need to issue a set dequeue
-                        * command for this stalled endpoint to move the dequeue
-                        * pointer past the TD.  We can't do that here because
-                        * the halt condition must be cleared first.  Let the
-                        * USB class driver clear the stall later.
-                        */
-                       ep->stopped_td = td;
-                       ep->stopped_stream = ep_ring->stream_id;
-               } else if (xhci_requires_manual_halt_cleanup(xhci,
-                                       ep_ctx, trb_comp_code)) {
-                       /* Other types of errors halt the endpoint, but the
-                        * class driver doesn't call usb_reset_endpoint() unless
-                        * the error is -EPIPE.  Clear the halted status in the
-                        * xHCI hardware manually.
+               if (trb_comp_code == COMP_STALL ||
+                   xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+                                                     trb_comp_code)) {
+                       /* Issue a reset endpoint command to clear the host side
+                        * halt, followed by a set dequeue command to move the
+                        * dequeue pointer past the TD.
+                        * The class driver clears the device side halt later.
                         */
                        xhci_cleanup_halted_endpoint(xhci,
                                        slot_id, ep_index, ep_ring->stream_id,
@@ -2133,9 +2124,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct 
xhci_td *td,
                else
                        td->urb->actual_length = 0;
 
-               xhci_cleanup_halted_endpoint(xhci,
-                       slot_id, ep_index, 0, td, event_trb);
-               return finish_td(xhci, td, event_trb, event, ep, status, true);
+               return finish_td(xhci, td, event_trb, event, ep, status, false);
        }
        /*
         * Did we transfer any data, despite the errors that might have
@@ -2689,17 +2678,8 @@ cleanup:
                if (ret) {
                        urb = td->urb;
                        urb_priv = urb->hcpriv;
-                       /* Leave the TD around for the reset endpoint function
-                        * to use(but only if it's not a control endpoint,
-                        * since we already queued the Set TR dequeue pointer
-                        * command for stalled control endpoints).
-                        */
-                       if (usb_endpoint_xfer_control(&urb->ep->desc) ||
-                               (trb_comp_code != COMP_STALL &&
-                                       trb_comp_code != COMP_BABBLE))
-                               xhci_urb_free_priv(xhci, urb_priv);
-                       else
-                               kfree(urb_priv);
+
+                       xhci_urb_free_priv(xhci, urb_priv);
 
                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), 
urb);
                        if ((urb->actual_length != urb->transfer_buffer_length 
&&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 17e398748a2d..16f4f8dc1ae9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2925,63 +2925,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
        }
 }
 
-/* Deal with stalled endpoints.  The core should have sent the control message
- * to clear the halt condition.  However, we need to make the xHCI hardware
- * reset its sequence number, since a device will expect a sequence number of
- * zero after the halt condition is cleared.
+/* Called when clearing halted device. The core should have sent the control
+ * message to clear the device halt condition. The host side of the halt should
+ * already be cleared with a reset endpoint command issued when the STALL tx
+ * event was received.
+ *
  * Context: in_interrupt
  */
+
 void xhci_endpoint_reset(struct usb_hcd *hcd,
                struct usb_host_endpoint *ep)
 {
        struct xhci_hcd *xhci;
-       struct usb_device *udev;
-       unsigned int ep_index;
-       unsigned long flags;
-       int ret;
-       struct xhci_virt_ep *virt_ep;
 
        xhci = hcd_to_xhci(hcd);
-       udev = (struct usb_device *) ep->hcpriv;
-       /* Called with a root hub endpoint (or an endpoint that wasn't added
-        * with xhci_add_endpoint()
-        */
-       if (!ep->hcpriv)
-               return;
-       ep_index = xhci_get_endpoint_index(&ep->desc);
-       virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
-       if (!virt_ep->stopped_td) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                       "Endpoint 0x%x not halted, refusing to reset.",
-                       ep->desc.bEndpointAddress);
-               return;
-       }
-       if (usb_endpoint_xfer_control(&ep->desc)) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                               "Control endpoint stall already handled.");
-               return;
-       }
 
-       xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                       "Queueing reset endpoint command");
-       spin_lock_irqsave(&xhci->lock, flags);
-       ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
        /*
-        * Can't change the ring dequeue pointer until it's transitioned to the
-        * stopped state, which is only upon a successful reset endpoint
-        * command.  Better hope that last command worked!
+        * We might need to implement the config ep cmd in xhci 4.8.1 note:
+        * The Reset Endpoint Command may only be issued to endpoints in the
+        * Halted state. If software wishes reset the Data Toggle or Sequence
+        * Number of an endpoint that isn't in the Halted state, then software
+        * may issue a Configure Endpoint Command with the Drop and Add bits set
+        * for the target endpoint. that is in the Stopped state.
         */
-       if (!ret) {
-               xhci_cleanup_stalled_ring(xhci, udev, ep_index);
-               kfree(virt_ep->stopped_td);
-               xhci_ring_cmd_db(xhci);
-       }
-       virt_ep->stopped_td = NULL;
-       virt_ep->stopped_stream = 0;
-       spin_unlock_irqrestore(&xhci->lock, flags);
 
-       if (ret)
-               xhci_warn(xhci, "FIXME allocate a new ring segment\n");
+       /* For now just print debug to follow the situation */
+       xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
+                ep->desc.bEndpointAddress);
 }
 
 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/mm/frontswap.c b/mm/frontswap.c
index c30eec536f03..f2a3571c6e22 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
                  the (older) page from frontswap
                 */
                inc_frontswap_failed_stores();
-               if (dup)
+               if (dup) {
                        __frontswap_clear(sis, offset);
+                       frontswap_ops->invalidate_page(type, offset);
+               }
        }
        if (frontswap_writethrough_enabled)
                /* report failure so swap also writes to swap device */
diff --git a/mm/memory.c b/mm/memory.c
index 492e36f27f43..48d7365ba4e4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -808,20 +808,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct 
*src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       if (swap_duplicate(entry) < 0)
-                               return entry.val;
-
-                       /* make sure dst_mm is on swapoff's mmlist. */
-                       if (unlikely(list_empty(&dst_mm->mmlist))) {
-                               spin_lock(&mmlist_lock);
-                               if (list_empty(&dst_mm->mmlist))
-                                       list_add(&dst_mm->mmlist,
-                                                &src_mm->mmlist);
-                               spin_unlock(&mmlist_lock);
-                       }
-                       if (likely(!non_swap_entry(entry)))
+                       if (likely(!non_swap_entry(entry))) {
+                               if (swap_duplicate(entry) < 0)
+                                       return entry.val;
+
+                               /* make sure dst_mm is on swapoff's mmlist. */
+                               if (unlikely(list_empty(&dst_mm->mmlist))) {
+                                       spin_lock(&mmlist_lock);
+                                       if (list_empty(&dst_mm->mmlist))
+                                               list_add(&dst_mm->mmlist,
+                                                        &src_mm->mmlist);
+                                       spin_unlock(&mmlist_lock);
+                               }
                                rss[MM_SWAPENTS]++;
-                       else if (is_migration_entry(entry)) {
+                       } else if (is_migration_entry(entry)) {
                                page = migration_entry_to_page(entry);
 
                                if (PageAnon(page))
diff --git a/mm/mmap.c b/mm/mmap.c
index dfe90657a6db..b91ac800d7b7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -745,8 +745,11 @@ again:                     remove_next = 1 + (end > 
next->vm_end);
                 * shrinking vma had, to cover any anon pages imported.
                 */
                if (exporter && exporter->anon_vma && !importer->anon_vma) {
-                       if (anon_vma_clone(importer, exporter))
-                               return -ENOMEM;
+                       int error;
+
+                       error = anon_vma_clone(importer, exporter);
+                       if (error)
+                               return error;
                        importer->anon_vma = exporter->anon_vma;
                }
        }
@@ -2428,7 +2431,8 @@ static int __split_vma(struct mm_struct * mm, struct 
vm_area_struct * vma,
        if (err)
                goto out_free_vma;
 
-       if (anon_vma_clone(new, vma))
+       err = anon_vma_clone(new, vma);
+       if (err)
                goto out_free_mpol;
 
        if (new->vm_file)
diff --git a/mm/rmap.c b/mm/rmap.c
index cdbd31285cf6..cab982084e2b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct 
vm_area_struct *pvma)
 {
        struct anon_vma_chain *avc;
        struct anon_vma *anon_vma;
+       int error;
 
        /* Don't bother if the parent process has no anon_vma here. */
        if (!pvma->anon_vma)
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct 
vm_area_struct *pvma)
         * First, attach the new VMA to the parent VMA's anon_vmas,
         * so rmap can find non-COWed pages in child processes.
         */
-       if (anon_vma_clone(vma, pvma))
-               return -ENOMEM;
+       error = anon_vma_clone(vma, pvma);
+       if (error)
+               return error;
 
        /* Then add our own anon_vma. */
        anon_vma = anon_vma_alloc();
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index d4042e75f7c7..c5afd573d7da 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
        unsigned long scanned;
        unsigned long reclaimed;
 
+       spin_lock(&vmpr->sr_lock);
        /*
         * Several contexts might be calling vmpressure(), so it is
         * possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
         * here. No need for any locks here since we don't care if
         * vmpr->reclaimed is in sync.
         */
-       if (!vmpr->scanned)
+       scanned = vmpr->scanned;
+       if (!scanned) {
+               spin_unlock(&vmpr->sr_lock);
                return;
+       }
 
-       spin_lock(&vmpr->sr_lock);
-       scanned = vmpr->scanned;
        reclaimed = vmpr->reclaimed;
        vmpr->scanned = 0;
        vmpr->reclaimed = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b0db904f083d..46175866851e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1453,6 +1453,7 @@ static int do_setlink(const struct sk_buff *skb,
                        goto errout;
                }
                if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+                       put_net(net);
                        err = -EPERM;
                        goto errout;
                }
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 8c8493ea6b1c..278836f1a5ad 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -271,6 +271,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
                err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
 
        rcu_read_unlock();
+
+       skb_set_inner_mac_header(skb, nhoff + grehlen);
+
        return err;
 }
 
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e4a8f76c8995..b0a9cb4bbfdb 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -369,6 +369,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
        .validate       = vti_tunnel_validate,
        .newlink        = vti_newlink,
        .changelink     = vti_changelink,
+       .dellink        = ip_tunnel_dellink,
        .get_size       = vti_get_size,
        .fill_info      = vti_fill_info,
 };
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index b27f6d34762b..4a230b18dfe3 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -508,11 +508,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
 
                skb->protocol = gre_proto;
                /* WCCP version 1 and 2 protocol decoding.
-                * - Change protocol to IP
+                * - Change protocol to IPv6
                 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
                 */
                if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
-                       skb->protocol = htons(ETH_P_IP);
+                       skb->protocol = htons(ETH_P_IPV6);
                        if ((*(h + offset) & 0xF0) != 0x40)
                                offset += 4;
                }
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 9a5339fcb450..28456c9a1847 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -825,6 +825,15 @@ static int vti6_newlink(struct net *src_net, struct 
net_device *dev,
        return vti6_tnl_create2(dev);
 }
 
+static void vti6_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       if (dev != ip6n->fb_tnl_dev)
+               unregister_netdevice_queue(dev, head);
+}
+
 static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
                           struct nlattr *data[])
 {
@@ -900,6 +909,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
        .setup          = vti6_dev_setup,
        .validate       = vti6_validate,
        .newlink        = vti6_newlink,
+       .dellink        = vti6_dellink,
        .changelink     = vti6_changelink,
        .get_size       = vti6_get_size,
        .fill_info      = vti6_fill_info,
@@ -945,6 +955,7 @@ static int __net_init vti6_init_net(struct net *net)
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
        dev_net_set(ip6n->fb_tnl_dev, net);
+       ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops;
 
        err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
        if (err < 0)
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7c7df475a401..f056f9ed97fb 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -54,6 +54,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 
*b_0, u8 *aad,
 
        memset(&aead_req, 0, sizeof(aead_req));
 
+       if (data_len == 0)
+               return -EINVAL;
+
        sg_init_one(&pt, data, data_len);
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
        sg_init_table(ct, 2);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 8267b06c3646..740ca5f7add0 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -401,12 +401,12 @@ int sctp_packet_transmit(struct sctp_packet *packet)
        sk = chunk->skb->sk;
 
        /* Allocate the new skb.  */
-       nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
+       nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
        if (!nskb)
                goto nomem;
 
        /* Make sure the outbound skb has enough header room reserved. */
-       skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
+       skb_reserve(nskb, packet->overhead + MAX_HEADER);
 
        /* Set the owning socket so that we know where to get the
         * destination IP address.
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index eaf64ea2e1c1..1a05efa08d96 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -333,6 +333,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
 
 static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+       SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
        SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
        SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", 
AD1986A_FIXUP_3STACK),
        SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", 
AD1986A_FIXUP_3STACK),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4c826a40705c..910f2dbe1b24 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4554,6 +4554,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", 
ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", 
ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", 
ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
        SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", 
ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index b901f468b67a..c7aa71ee775b 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -364,6 +364,8 @@ static void snd_usbmidi_error_timer(unsigned long data)
                if (in && in->error_resubmit) {
                        in->error_resubmit = 0;
                        for (j = 0; j < INPUT_URBS; ++j) {
+                               if (atomic_read(&in->urbs[j]->use_count))
+                                       continue;
                                in->urbs[j]->dev = umidi->dev;
                                snd_usbmidi_submit_urb(in->urbs[j], GFP_ATOMIC);
                        }
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to