diff options
author | Amerigo Wang <amwang@redhat.com> | 2012-09-17 20:16:31 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-09-19 17:19:09 -0400 |
commit | 8c4c49df5cfeb8d56e5b85a430c8cbcb86c2ac37 (patch) | |
tree | f57cd83822deb6f675dce888f86d007f2b9e3998 /net | |
parent | 6b6e27255f29a6191ef8ad96bfcc392ab2ef6c71 (diff) | |
download | op-kernel-dev-8c4c49df5cfeb8d56e5b85a430c8cbcb86c2ac37.zip op-kernel-dev-8c4c49df5cfeb8d56e5b85a430c8cbcb86c2ac37.tar.gz |
netpoll: call ->ndo_select_queue() in tx path
In netpoll tx path, we miss the chance of calling ->ndo_select_queue(),
thus could cause problems when bonding is involved.
This patch makes dev_pick_tx() extern (and rename it to netdev_pick_tx())
to let netpoll call it in netpoll_send_skb_on_dev().
Reported-by: Sylvain Munaut <s.munaut@whatever-company.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Signed-off-by: Cong Wang <amwang@redhat.com>
Tested-by: Sylvain Munaut <s.munaut@whatever-company.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 6 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 |
2 files changed, 4 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index bbda819..707b124 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2396,8 +2396,8 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) #endif } -static struct netdev_queue *dev_pick_tx(struct net_device *dev, - struct sk_buff *skb) +struct netdev_queue *netdev_pick_tx(struct net_device *dev, + struct sk_buff *skb) { int queue_index; const struct net_device_ops *ops = dev->netdev_ops; @@ -2571,7 +2571,7 @@ int dev_queue_xmit(struct sk_buff *skb) skb_update_prio(skb); - txq = dev_pick_tx(dev, skb); + txq = netdev_pick_tx(dev, skb); q = rcu_dereference_bh(txq->qdisc); #ifdef CONFIG_NET_CLS_ACT diff --git a/net/core/netpoll.c b/net/core/netpoll.c index dd67818..77a0388 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -328,7 +328,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { struct netdev_queue *txq; - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + txq = netdev_pick_tx(dev, skb); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |