ChangeSet 1.2199.8.23, 2005/03/22 18:31:14-08:00, [EMAIL PROTECTED]
[NETPOLL]: Add optional dropping and queueing support
This adds a callback for packets we can't deliver immediately and a
helper function for clients to queue such packets to the device
post-interrupt.
Netconsole is modified to use the queueing function for best-effort
delivery.
Signed-off-by: Matt Mackall <[EMAIL PROTECTED]>
Signed-off-by: David S. Miller <[EMAIL PROTECTED]>
drivers/net/netconsole.c | 1
include/linux/netpoll.h | 2 +
net/core/netpoll.c | 59 +++++++++++++++++++++++++++++++++++++++++++++--
3 files changed, 60 insertions(+), 2 deletions(-)
diff -Nru a/drivers/net/netconsole.c b/drivers/net/netconsole.c
--- a/drivers/net/netconsole.c 2005-03-26 17:15:08 -08:00
+++ b/drivers/net/netconsole.c 2005-03-26 17:15:08 -08:00
@@ -60,6 +60,7 @@
.local_port = 6665,
.remote_port = 6666,
.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .drop = netpoll_queue,
};
static int configured = 0;
diff -Nru a/include/linux/netpoll.h b/include/linux/netpoll.h
--- a/include/linux/netpoll.h 2005-03-26 17:15:08 -08:00
+++ b/include/linux/netpoll.h 2005-03-26 17:15:08 -08:00
@@ -18,6 +18,7 @@
char dev_name[16], *name;
int rx_flags;
void (*rx_hook)(struct netpoll *, int, char *, int);
+ void (*drop)(struct sk_buff *skb);
u32 local_ip, remote_ip;
u16 local_port, remote_port;
unsigned char local_mac[6], remote_mac[6];
@@ -33,6 +34,7 @@
void netpoll_set_trap(int trap);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
+void netpoll_queue(struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
static inline int netpoll_rx(struct sk_buff *skb)
diff -Nru a/net/core/netpoll.c b/net/core/netpoll.c
--- a/net/core/netpoll.c 2005-03-26 17:15:08 -08:00
+++ b/net/core/netpoll.c 2005-03-26 17:15:08 -08:00
@@ -19,6 +19,7 @@
#include <linux/netpoll.h>
#include <linux/sched.h>
#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <asm/unaligned.h>
@@ -28,13 +29,18 @@
* message gets out even in extreme OOM situations.
*/
-#define MAX_SKBS 32
#define MAX_UDP_CHUNK 1460
+#define MAX_SKBS 32
+#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
static DEFINE_SPINLOCK(skb_list_lock);
static int nr_skbs;
static struct sk_buff *skbs;
+static DEFINE_SPINLOCK(queue_lock);
+static int queue_depth;
+static struct sk_buff *queue_head, *queue_tail;
+
static atomic_t trapped;
#define NETPOLL_RX_ENABLED 1
@@ -46,6 +52,50 @@
static void zap_completion_queue(void);
+static void queue_process(void *p)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ while (queue_head) {
+ spin_lock_irqsave(&queue_lock, flags);
+
+ skb = queue_head;
+ queue_head = skb->next;
+ if (skb == queue_tail)
+ queue_head = NULL;
+
+ queue_depth--;
+
+ spin_unlock_irqrestore(&queue_lock, flags);
+
+ dev_queue_xmit(skb);
+ }
+}
+
+static DECLARE_WORK(send_queue, queue_process, NULL);
+
+void netpoll_queue(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ if (queue_depth == MAX_QUEUE_DEPTH) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&queue_lock, flags);
+ if (!queue_head)
+ queue_head = skb;
+ else
+ queue_tail->next = skb;
+ queue_tail = skb;
+ queue_depth++;
+ spin_unlock_irqrestore(&queue_lock, flags);
+
+ schedule_work(&send_queue);
+}
+
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
unsigned short ulen, u32 saddr, u32 daddr)
{
@@ -199,7 +249,10 @@
/* avoid ->poll recursion */
if(np->poll_owner == __smp_processor_id()) {
- __kfree_skb(skb);
+ if (np->drop)
+ np->drop(skb);
+ else
+ __kfree_skb(skb);
return;
}
@@ -274,6 +327,8 @@
eth->h_proto = htons(ETH_P_IP);
memcpy(eth->h_source, np->local_mac, 6);
memcpy(eth->h_dest, np->remote_mac, 6);
+
+ skb->dev = np->dev;
netpoll_send_skb(np, skb);
}
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html