summaryrefslogtreecommitdiffstats
path: root/drivers/net/qlcnic
diff options
context:
space:
mode:
authorRajesh K Borundia <rajesh.borundia@qlogic.com>2010-06-17 02:56:41 +0000
committerDavid S. Miller <davem@davemloft.net>2010-06-17 08:57:56 -0700
commitef71ff833acfd3795c3af1bb800ac186561508ef (patch)
treefa285765677574d4aafb42c9fbbc32443a8e4597 /drivers/net/qlcnic
parent8f891387aa73b85d2ea8d953e04dac224f687e52 (diff)
downloadop-kernel-dev-ef71ff833acfd3795c3af1bb800ac186561508ef.zip
op-kernel-dev-ef71ff833acfd3795c3af1bb800ac186561508ef.tar.gz
qlcnic: fix race in tx stop queue
There is a race between netif_stop_queue and netif_stopped_queue check. So check once again if buffers are available to avoid race. With above logic we can also get rid of tx lock in process_cmd_ring. Signed-off-by: Rajesh K Borundia <rajesh.borundia@qlogic.com> Signed-off-by: Amit Kumar Salecha <amit.salecha@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qlcnic')
-rw-r--r--drivers/net/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c12
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c23
4 files changed, 26 insertions, 19 deletions
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9970cff..99ccdd8 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -113,8 +113,10 @@
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
-#define MAX_BUFFERS_PER_CMD 32
-#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
/*
@@ -369,7 +371,7 @@ struct qlcnic_recv_crb {
*/
struct qlcnic_cmd_buffer {
struct sk_buff *skb;
- struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
u32 frag_count;
};
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index f776956..d9becb9 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -338,9 +338,15 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
- __netif_tx_unlock_bh(tx_ring->txq);
- adapter->stats.xmit_off++;
- return -EBUSY;
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ adapter->stats.xmit_off++;
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
}
do {
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 2bd00d5..058ce61 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -181,7 +181,9 @@ skip_rds:
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
kfree(adapter->tx_ring);
+ adapter->tx_ring = NULL;
}
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 06d2dfd..655bccd 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -132,12 +132,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
writel(tx_ring->producer, tx_ring->crb_cmd_producer);
-
- if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
- netif_stop_queue(adapter->netdev);
- smp_mb();
- adapter->stats.xmit_off++;
- }
}
static const u32 msi_tgt_status[8] = {
@@ -1137,7 +1131,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
adapter->max_mc_count = 38;
netdev->netdev_ops = &qlcnic_netdev_ops;
- netdev->watchdog_timeo = 2*HZ;
+ netdev->watchdog_timeo = 5*HZ;
qlcnic_change_mtu(netdev, netdev->mtu);
@@ -1709,10 +1703,15 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
- if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+ if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
- adapter->stats.xmit_off++;
- return NETDEV_TX_BUSY;
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_start_queue(netdev);
+ else {
+ adapter->stats.xmit_off++;
+ return NETDEV_TX_BUSY;
+ }
}
producer = tx_ring->producer;
@@ -2018,14 +2017,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
- __netif_tx_lock(tx_ring->txq, smp_processor_id());
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_wake_queue(netdev);
- adapter->tx_timeo_cnt = 0;
adapter->stats.xmit_on++;
}
- __netif_tx_unlock(tx_ring->txq);
}
+ adapter->tx_timeo_cnt = 0;
}
/*
* If everything is freed up to consumer then check if the ring is full
OpenPOWER on IntegriCloud