summaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2006-06-17 21:58:45 -0700
committerDavid S. Miller <davem@davemloft.net>2006-06-17 21:58:45 -0700
commit00b7050426da8e7e58c889c5c80a19920d2d41b3 (patch)
tree55a07464fdf81b23fced966feca2b041af77c6f6 /drivers/net/tg3.c
parentc71302d61f844f766a44e1b04258086cc41f624e (diff)
downloadop-kernel-dev-00b7050426da8e7e58c889c5c80a19920d2d41b3.zip
op-kernel-dev-00b7050426da8e7e58c889c5c80a19920d2d41b3.tar.gz
[TG3]: Convert to non-LLTX
Herbert Xu pointed out that it is unsafe to call netif_tx_disable() from LLTX drivers because it uses dev->xmit_lock to synchronize whereas LLTX drivers use private locks. Convert tg3 to non-LLTX to fix this issue. tg3 is a lockless driver where hard_start_xmit and tx completion handling can run concurrently under normal conditions. A tx_lock is only needed to prevent netif_stop_queue and netif_wake_queue race condtions when the queue is full. So whether we use LLTX or non-LLTX, it makes practically no difference. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c27
1 files changed, 10 insertions, 17 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 542d4c3..b2ddd45 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3759,14 +3759,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
- /* No BH disabling for tx_lock here. We are running in BH disabled
- * context and TX reclaim runs via tp->poll inside of a software
+ /* We are running in BH disabled context with netif_tx_lock
+ * and TX reclaim runs via tp->poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (!spin_trylock(&tp->tx_lock))
- return NETDEV_TX_LOCKED;
-
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -3775,7 +3772,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
"queue awake!\n", dev->name);
}
- spin_unlock(&tp->tx_lock);
return NETDEV_TX_BUSY;
}
@@ -3858,15 +3854,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
- if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
+ if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
+ spin_lock(&tp->tx_lock);
netif_stop_queue(dev);
if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
+ spin_unlock(&tp->tx_lock);
}
out_unlock:
mmiowb();
- spin_unlock(&tp->tx_lock);
dev->trans_start = jiffies;
@@ -3885,14 +3882,11 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
- /* No BH disabling for tx_lock here. We are running in BH disabled
- * context and TX reclaim runs via tp->poll inside of a software
+ /* We are running in BH disabled context with netif_tx_lock
+ * and TX reclaim runs via tp->poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (!spin_trylock(&tp->tx_lock))
- return NETDEV_TX_LOCKED;
-
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -3901,7 +3895,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
"queue awake!\n", dev->name);
}
- spin_unlock(&tp->tx_lock);
return NETDEV_TX_BUSY;
}
@@ -4039,15 +4032,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
- if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
+ if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
+ spin_lock(&tp->tx_lock);
netif_stop_queue(dev);
if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
+ spin_unlock(&tp->tx_lock);
}
out_unlock:
mmiowb();
- spin_unlock(&tp->tx_lock);
dev->trans_start = jiffies;
@@ -11284,7 +11278,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = tg3_vlan_rx_register;
OpenPOWER on IntegriCloud