ChangeSet 1.2199.8.22, 2005/03/22 18:30:38-08:00, [EMAIL PROTECTED]
[NETPOLL]: Fix ->poll() locking
Introduce a per-client poll lock and flag. The lock assures we never
have more than one caller in dev->poll(). The flag provides recursion
avoidance on UP where the lock disappears.
Signed-off-by: Matt Mackall <[EMAIL PROTECTED]>
Signed-off-by: David S. Miller <[EMAIL PROTECTED]>
include/linux/netpoll.h | 21 +++++++++++++++++++++
net/core/dev.c | 3 +++
net/core/netpoll.c | 30 +++++++++++++++++++++---------
3 files changed, 45 insertions(+), 9 deletions(-)
diff -Nru a/include/linux/netpoll.h b/include/linux/netpoll.h
--- a/include/linux/netpoll.h 2005-03-26 17:14:55 -08:00
+++ b/include/linux/netpoll.h 2005-03-26 17:14:55 -08:00
@@ -21,6 +21,8 @@
u32 local_ip, remote_ip;
u16 local_port, remote_port;
unsigned char local_mac[6], remote_mac[6];
+ spinlock_t poll_lock;
+ int poll_owner;
};
void netpoll_poll(struct netpoll *np);
@@ -37,8 +39,27 @@
{
return skb->dev->np && skb->dev->np->rx_flags && __netpoll_rx(skb);
}
+
+static inline void netpoll_poll_lock(struct net_device *dev)
+{
+ if (dev->np) {
+ spin_lock(&dev->np->poll_lock);
+ dev->np->poll_owner = __smp_processor_id();
+ }
+}
+
+static inline void netpoll_poll_unlock(struct net_device *dev)
+{
+ if (dev->np) {
+ spin_unlock(&dev->np->poll_lock);
+ dev->np->poll_owner = -1;
+ }
+}
+
#else
#define netpoll_rx(a) 0
+#define netpoll_poll_lock(a)
+#define netpoll_poll_unlock(a)
#endif
#endif
diff -Nru a/net/core/dev.c b/net/core/dev.c
--- a/net/core/dev.c 2005-03-26 17:14:55 -08:00
+++ b/net/core/dev.c 2005-03-26 17:14:55 -08:00
@@ -1775,8 +1775,10 @@
dev = list_entry(queue->poll_list.next,
struct net_device, poll_list);
+ netpoll_poll_lock(dev);
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
+ netpoll_poll_unlock(dev);
local_irq_disable();
list_del(&dev->poll_list);
list_add_tail(&dev->poll_list, &queue->poll_list);
@@ -1785,6 +1787,7 @@
else
dev->quota = dev->weight;
} else {
+ netpoll_poll_unlock(dev);
dev_put(dev);
local_irq_disable();
}
diff -Nru a/net/core/netpoll.c b/net/core/netpoll.c
--- a/net/core/netpoll.c 2005-03-26 17:14:55 -08:00
+++ b/net/core/netpoll.c 2005-03-26 17:14:55 -08:00
@@ -36,7 +36,6 @@
static struct sk_buff *skbs;
static atomic_t trapped;
-static DEFINE_SPINLOCK(netpoll_poll_lock);
#define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2
@@ -63,8 +62,15 @@
}
/*
- * Check whether delayed processing was scheduled for our current CPU,
- * and then manually invoke NAPI polling to pump data off the card.
+ * Check whether delayed processing was scheduled for our NIC. If so,
+ * we attempt to grab the poll lock and use ->poll() to pump the card.
+ * If this fails, either we've recursed in ->poll() or it's already
+ * running on another CPU.
+ *
+ * Note: we don't mask interrupts with this lock because we're using
+ * trylock here and interrupts are already disabled in the softirq
+ * case. Further, we test the poll_owner to avoid recursion on UP
+ * systems where the lock doesn't exist.
*
* In cases where there is bi-directional communications, reading only
* one message at a time can lead to packets being dropped by the
@@ -74,13 +80,10 @@
static void poll_napi(struct netpoll *np)
{
int budget = 16;
- unsigned long flags;
- struct softnet_data *queue;
- spin_lock_irqsave(&netpoll_poll_lock, flags);
- queue = &__get_cpu_var(softnet_data);
if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
- !list_empty(&queue->poll_list)) {
+ np->poll_owner != __smp_processor_id() &&
+ spin_trylock(&np->poll_lock)) {
np->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
@@ -88,8 +91,8 @@
atomic_dec(&trapped);
np->rx_flags &= ~NETPOLL_RX_DROP;
+ spin_unlock(&np->poll_lock);
}
- spin_unlock_irqrestore(&netpoll_poll_lock, flags);
}
void netpoll_poll(struct netpoll *np)
@@ -194,6 +197,12 @@
return;
}
+ /* avoid ->poll recursion */
+ if(np->poll_owner == __smp_processor_id()) {
+ __kfree_skb(skb);
+ return;
+ }
+
spin_lock(&np->dev->xmit_lock);
np->dev->xmit_lock_owner = smp_processor_id();
@@ -541,6 +550,9 @@
{
struct net_device *ndev = NULL;
struct in_device *in_dev;
+
+ np->poll_lock = SPIN_LOCK_UNLOCKED;
+ np->poll_owner = -1;
if (np->dev_name)
ndev = dev_get_by_name(np->dev_name);
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html