diff options
author | Stephen Hemminger <shemminger@vyatta.com> | 2008-08-03 21:29:57 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-03 21:29:57 -0700 |
commit | 6e583ce5242f32e925dcb198f7123256d0798370 (patch) | |
tree | 9bf826ddc1c2826015a6d59141f7c53e094b0204 /net | |
parent | 283d07ac201ee9f8aa6dc6f7519436b48760baff (diff) | |
download | op-kernel-dev-6e583ce5242f32e925dcb198f7123256d0798370.zip op-kernel-dev-6e583ce5242f32e925dcb198f7123256d0798370.tar.gz |
net: eliminate refcounting in backlog queue
Avoid the overhead of atomic increment/decrement on each received packet.
This helps performance of non-NAPI devices (like loopback).
Use cleanup function to walk queue on each cpu and clean out any
left over packets.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 23 |
1 files changed, 16 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index cbf8009..fc6c988 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1909,7 +1909,6 @@ int netif_rx(struct sk_buff *skb) if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen) { enqueue: - dev_hold(skb->dev); __skb_queue_tail(&queue->input_pkt_queue, skb); local_irq_restore(flags); return NET_RX_SUCCESS; @@ -2270,6 +2269,20 @@ out: return ret; } +/* Network device is going away, flush any packets still pending */ +static void flush_backlog(void *arg) +{ + struct net_device *dev = arg; + struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) + if (skb->dev == dev) { + __skb_unlink(skb, &queue->input_pkt_queue); + kfree_skb(skb); + } +} + static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; @@ -2279,7 +2292,6 @@ static int process_backlog(struct napi_struct *napi, int quota) napi->weight = weight_p; do { struct sk_buff *skb; - struct net_device *dev; local_irq_disable(); skb = __skb_dequeue(&queue->input_pkt_queue); @@ -2288,14 +2300,9 @@ static int process_backlog(struct napi_struct *napi, int quota) local_irq_enable(); break; } - local_irq_enable(); - dev = skb->dev; - netif_receive_skb(skb); - - dev_put(dev); } while (++work < quota && jiffies == start_time); return work; @@ -4169,6 +4176,8 @@ void netdev_run_todo(void) dev->reg_state = NETREG_UNREGISTERED; + on_each_cpu(flush_backlog, dev, 1); + netdev_wait_allrefs(dev); /* paranoia */ |