The transmit slots were manipulated without proper locking. Fix this bug by 
making the variable tracking the transmit slots atomic.

This patch should be ported to prior stable kernels 2.6.32 and later. 


Signed-off-by: K. Y. Srinivasan <k...@microsoft.com>
Signed-off-by: Haiyang Zhang <haiya...@microsoft.com>
Signed-off-by: Hank Janssen <hjans...@microsoft.com>
Cc: stable <sta...@kernel.org>
---
 drivers/staging/hv/netvsc_drv.c |   16 +++++++++-------
 1 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 33cab9c..38ca2c2 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -21,6 +21,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/init.h>
+#include <linux/atomic.h>
 #include <linux/module.h>
 #include <linux/highmem.h>
 #include <linux/device.h>
@@ -45,7 +46,7 @@
 struct net_device_context {
        /* point back to our device context */
        struct hv_device *device_ctx;
-       unsigned long avail;
+       atomic_t avail;
        struct delayed_work dwork;
 };
 
@@ -118,8 +119,9 @@ static void netvsc_xmit_completion(void *context)
 
                dev_kfree_skb_any(skb);
 
-               net_device_ctx->avail += num_pages;
-               if (net_device_ctx->avail >= PACKET_PAGES_HIWATER)
+               atomic_add(num_pages, &net_device_ctx->avail);
+               if (atomic_read(&net_device_ctx->avail) >=
+                               PACKET_PAGES_HIWATER)
                        netif_wake_queue(net);
        }
 }
@@ -133,7 +135,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct 
net_device *net)
 
        /* Add 1 for skb->data and additional one for RNDIS */
        num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
-       if (num_pages > net_device_ctx->avail)
+       if (num_pages > atomic_read(&net_device_ctx->avail))
                return NETDEV_TX_BUSY;
 
        /* Allocate a netvsc packet based on # of frags. */
@@ -185,8 +187,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct 
net_device *net)
                net->stats.tx_bytes += skb->len;
                net->stats.tx_packets++;
 
-               net_device_ctx->avail -= num_pages;
-               if (net_device_ctx->avail < PACKET_PAGES_LOWATER)
+               atomic_sub(num_pages, &net_device_ctx->avail);
+               if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
                        netif_stop_queue(net);
        } else {
                /* we are shutting down or bus overloaded, just drop packet */
@@ -345,7 +347,7 @@ static int netvsc_probe(struct hv_device *dev)
 
        net_device_ctx = netdev_priv(net);
        net_device_ctx->device_ctx = dev;
-       net_device_ctx->avail = ring_size;
+       atomic_set(&net_device_ctx->avail, ring_size);
        dev_set_drvdata(&dev->device, net);
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
 
-- 
1.7.4.1

_______________________________________________
stable mailing list
stable@linux.kernel.org
http://linux.kernel.org/mailman/listinfo/stable

Reply via email to