summaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-05-19 23:16:03 +0000
committerDavid S. Miller <davem@davemloft.net>2010-05-31 00:24:01 -0700
commit15e83ed78864d0625e87a85f09b297c0919a4797 (patch)
treea138efc4400c0857a9728f7ebbfe4f5c4fb71ad1 /net/core/netpoll.c
parent27f39c73e63833b4c081a0d681d88b4184a0491d (diff)
downloadop-kernel-dev-15e83ed78864d0625e87a85f09b297c0919a4797.zip
op-kernel-dev-15e83ed78864d0625e87a85f09b297c0919a4797.tar.gz
net: remove zap_completion_queue
netpoll does an interesting work in zap_completion_queue(), but this was before we did skb orphaning before delivering packets to device. It now makes sense to add a test in dev_kfree_skb_irq() to not queue a skb if already orphaned, and to remove netpoll zap_completion_queue() as a bonus. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c31
1 files changed, 0 insertions, 31 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 94825b1..e034342 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -49,7 +49,6 @@ static atomic_t trapped;
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
sizeof(struct iphdr) + sizeof(struct ethhdr))
-static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
static unsigned int carrier_timeout = 4;
@@ -197,7 +196,6 @@ void netpoll_poll_dev(struct net_device *dev)
service_arp_queue(dev->npinfo);
- zap_completion_queue();
}
void netpoll_poll(struct netpoll *np)
@@ -221,40 +219,11 @@ static void refill_skbs(void)
spin_unlock_irqrestore(&skb_pool.lock, flags);
}
-static void zap_completion_queue(void)
-{
- unsigned long flags;
- struct softnet_data *sd = &get_cpu_var(softnet_data);
-
- if (sd->completion_queue) {
- struct sk_buff *clist;
-
- local_irq_save(flags);
- clist = sd->completion_queue;
- sd->completion_queue = NULL;
- local_irq_restore(flags);
-
- while (clist != NULL) {
- struct sk_buff *skb = clist;
- clist = clist->next;
- if (skb->destructor) {
- atomic_inc(&skb->users);
- dev_kfree_skb_any(skb); /* put this one back */
- } else {
- __kfree_skb(skb);
- }
- }
- }
-
- put_cpu_var(softnet_data);
-}
-
static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
{
int count = 0;
struct sk_buff *skb;
- zap_completion_queue();
refill_skbs();
repeat:
OpenPOWER on IntegriCloud