summaryrefslogtreecommitdiffstats
path: root/sys/dev/e1000
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2012-03-30 19:54:48 +0000
committerjhb <jhb@FreeBSD.org>2012-03-30 19:54:48 +0000
commit0f7a6b4eb78584a88dfb32549c62e351d32b33fe (patch)
tree04b0eb50781f96b1e7e2e14293fb989d98971e1b /sys/dev/e1000
parent1dd88fec2f6597efdfc693ade6e17b4a47b033c3 (diff)
downloadFreeBSD-src-0f7a6b4eb78584a88dfb32549c62e351d32b33fe.zip
FreeBSD-src-0f7a6b4eb78584a88dfb32549c62e351d32b33fe.tar.gz
Fix a few issues with transmit handling in em(4) and igb(4):
- Do not define the foo_start() methods or set if_start in the ifnet if multiq transmit is enabled. Also, set if_transmit and if_qflush before ether_ifattach rather than after when multiq transmit is enabled. This helps to ensure that the drivers never try to mix different transmit methods. - Properly restart transmit during resume. igb(4) was not restarting it at all, and em(4) was restarting even if the link was down and was calling the wrong method if multiq transmit was enabled. - Remove all the 'more' handling for transmit completions. Transmit completion processing does not have a processing limit, so it always runs to completion and never has more work to do when it returns. Instead, the previous code was returning 'true' anytime there were packets in the queue that weren't still in the process of being transmitted. The effect was that the driver would continuously reschedule a task to process TX completions in effect running at 100% CPU polling the hardware until it finished transmitting all of the packets in the ring. Now it will just wait for the next TX completion interrupt. - Restart packet transmission when the link becomes active. - Fix the MSI-X queue interrupt handlers to restart packet transmission if there are pending packets in the relevant software queue (IFQ or buf_ring) after processing TX completions. This is the root cause for the OACTIVE hangs as if the MSI-X queue handler drained all the pending packets from the TX ring, nothing would ever restart it. As such, remove some previously-added workarounds to reschedule a task to poll the TX ring anytime OACTIVE was set. Tested by: sbruno Reviewed by: jfv MFC after: 1 week
Diffstat (limited to 'sys/dev/e1000')
-rw-r--r--sys/dev/e1000/if_em.c96
-rw-r--r--sys/dev/e1000/if_igb.c81
2 files changed, 130 insertions, 47 deletions
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 8ab511d..9c31dad 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -193,13 +193,14 @@ static int em_detach(device_t);
static int em_shutdown(device_t);
static int em_suspend(device_t);
static int em_resume(device_t);
-static void em_start(struct ifnet *);
-static void em_start_locked(struct ifnet *, struct tx_ring *);
#ifdef EM_MULTIQUEUE
static int em_mq_start(struct ifnet *, struct mbuf *);
static int em_mq_start_locked(struct ifnet *,
struct tx_ring *, struct mbuf *);
static void em_qflush(struct ifnet *);
+#else
+static void em_start(struct ifnet *);
+static void em_start_locked(struct ifnet *, struct tx_ring *);
#endif
static int em_ioctl(struct ifnet *, u_long, caddr_t);
static void em_init(void *);
@@ -234,7 +235,7 @@ static void em_enable_intr(struct adapter *);
static void em_disable_intr(struct adapter *);
static void em_update_stats_counters(struct adapter *);
static void em_add_hw_stats(struct adapter *adapter);
-static bool em_txeof(struct tx_ring *);
+static void em_txeof(struct tx_ring *);
static bool em_rxeof(struct rx_ring *, int, int *);
#ifndef __NO_STRICT_ALIGNMENT
static int em_fixup_rx(struct rx_ring *);
@@ -847,6 +848,7 @@ static int
em_resume(device_t dev)
{
struct adapter *adapter = device_get_softc(dev);
+ struct tx_ring *txr = adapter->tx_rings;
struct ifnet *ifp = adapter->ifp;
EM_CORE_LOCK(adapter);
@@ -854,8 +856,22 @@ em_resume(device_t dev)
e1000_resume_workarounds_pchlan(&adapter->hw);
em_init_locked(adapter);
em_init_manageability(adapter);
+
+ if ((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ EM_TX_LOCK(txr);
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
+#endif
+ EM_TX_UNLOCK(txr);
+ }
+ }
EM_CORE_UNLOCK(adapter);
- em_start(ifp);
return bus_generic_resume(dev);
}
@@ -959,7 +975,7 @@ em_qflush(struct ifnet *ifp)
}
if_qflush(ifp);
}
-#endif /* EM_MULTIQUEUE */
+#else /* !EM_MULTIQUEUE */
static void
em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
@@ -1020,14 +1036,9 @@ em_start(struct ifnet *ifp)
em_start_locked(ifp, txr);
EM_TX_UNLOCK(txr);
}
- /*
- ** If we went inactive schedule
- ** a task to clean up.
- */
- if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
- taskqueue_enqueue(txr->tq, &txr->tx_task);
return;
}
+#endif /* EM_MULTIQUEUE */
/*********************************************************************
* Ioctl entry point
@@ -1424,7 +1435,8 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- em_start_locked(ifp, txr);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
#endif
EM_TX_UNLOCK(txr);
@@ -1497,10 +1509,11 @@ em_handle_que(void *context, int pending)
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- em_start_locked(ifp, txr);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
#endif
EM_TX_UNLOCK(txr);
- if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
+ if (more) {
taskqueue_enqueue(adapter->tq, &adapter->que_task);
return;
}
@@ -1521,17 +1534,21 @@ em_msix_tx(void *arg)
{
struct tx_ring *txr = arg;
struct adapter *adapter = txr->adapter;
- bool more;
+ struct ifnet *ifp = adapter->ifp;
++txr->tx_irq;
EM_TX_LOCK(txr);
- more = em_txeof(txr);
+ em_txeof(txr);
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
+#endif
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
EM_TX_UNLOCK(txr);
- if (more)
- taskqueue_enqueue(txr->tq, &txr->tx_task);
- else
- /* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
return;
}
@@ -1609,7 +1626,8 @@ em_handle_tx(void *context, int pending)
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- em_start_locked(ifp, txr);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
#endif
E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
EM_TX_UNLOCK(txr);
@@ -1619,6 +1637,7 @@ static void
em_handle_link(void *context, int pending)
{
struct adapter *adapter = context;
+ struct tx_ring *txr = adapter->tx_rings;
struct ifnet *ifp = adapter->ifp;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
@@ -1630,6 +1649,19 @@ em_handle_link(void *context, int pending)
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
E1000_WRITE_REG(&adapter->hw, E1000_IMS,
EM_MSIX_LINK | E1000_IMS_LSC);
+ if (adapter->link_active) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ EM_TX_LOCK(txr);
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
+#endif
+ EM_TX_UNLOCK(txr);
+ }
+ }
EM_CORE_UNLOCK(adapter);
}
@@ -2902,20 +2934,21 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_softc = adapter;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = em_ioctl;
+#ifdef EM_MULTIQUEUE
+ /* Multiqueue stack interface */
+ ifp->if_transmit = em_mq_start;
+ ifp->if_qflush = em_qflush;
+#else
ifp->if_start = em_start;
IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
IFQ_SET_READY(&ifp->if_snd);
+#endif
ether_ifattach(ifp, adapter->hw.mac.addr);
ifp->if_capabilities = ifp->if_capenable = 0;
-#ifdef EM_MULTIQUEUE
- /* Multiqueue stack interface */
- ifp->if_transmit = em_mq_start;
- ifp->if_qflush = em_qflush;
-#endif
ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
ifp->if_capabilities |= IFCAP_TSO4;
@@ -3742,7 +3775,7 @@ em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
* tx_buffer is put back on the free queue.
*
**********************************************************************/
-static bool
+static void
em_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
@@ -3762,14 +3795,14 @@ em_txeof(struct tx_ring *txr)
selwakeuppri(&na->tx_si, PI_NET);
EM_CORE_UNLOCK(adapter);
EM_TX_LOCK(txr);
- return (FALSE);
+ return;
}
#endif /* DEV_NETMAP */
/* No work, make sure watchdog is off */
if (txr->tx_avail == adapter->num_tx_desc) {
txr->queue_status = EM_QUEUE_IDLE;
- return (FALSE);
+ return;
}
processed = 0;
@@ -3858,10 +3891,7 @@ em_txeof(struct tx_ring *txr)
/* Disable watchdog if all clean */
if (txr->tx_avail == adapter->num_tx_desc) {
txr->queue_status = EM_QUEUE_IDLE;
- return (FALSE);
}
-
- return (TRUE);
}
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index b409800..5a5ec81 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -171,13 +171,14 @@ static int igb_detach(device_t);
static int igb_shutdown(device_t);
static int igb_suspend(device_t);
static int igb_resume(device_t);
-static void igb_start(struct ifnet *);
-static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
#if __FreeBSD_version >= 800000
static int igb_mq_start(struct ifnet *, struct mbuf *);
static int igb_mq_start_locked(struct ifnet *,
struct tx_ring *, struct mbuf *);
static void igb_qflush(struct ifnet *);
+#else
+static void igb_start(struct ifnet *);
+static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
#endif
static int igb_ioctl(struct ifnet *, u_long, caddr_t);
static void igb_init(void *);
@@ -261,6 +262,7 @@ static void igb_msix_que(void *);
static void igb_msix_link(void *);
static void igb_handle_que(void *context, int pending);
static void igb_handle_link(void *context, int pending);
+static void igb_handle_link_locked(struct adapter *);
static void igb_set_sysctl_value(struct adapter *, const char *,
const char *, int *, int);
@@ -807,6 +809,7 @@ static int
igb_resume(device_t dev)
{
struct adapter *adapter = device_get_softc(dev);
+ struct tx_ring *txr = adapter->tx_rings;
struct ifnet *ifp = adapter->ifp;
IGB_CORE_LOCK(adapter);
@@ -814,9 +817,21 @@ igb_resume(device_t dev)
igb_init_manageability(adapter);
if ((ifp->if_flags & IFF_UP) &&
- (ifp->if_drv_flags & IFF_DRV_RUNNING))
- igb_start(ifp);
-
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IGB_TX_LOCK(txr);
+#if __FreeBSD_version >= 800000
+ /* Process the stack queue only if not depleted */
+ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) &&
+ !drbr_empty(ifp, txr->br))
+ igb_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ igb_start_locked(txr, ifp);
+#endif
+ IGB_TX_UNLOCK(txr);
+ }
+ }
IGB_CORE_UNLOCK(adapter);
return bus_generic_resume(dev);
@@ -1330,19 +1345,19 @@ igb_handle_que(void *context, int pending)
more = igb_rxeof(que, adapter->rx_process_limit, NULL);
IGB_TX_LOCK(txr);
- if (igb_txeof(txr))
- more = TRUE;
+ igb_txeof(txr);
#if __FreeBSD_version >= 800000
/* Process the stack queue only if not depleted */
if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) &&
!drbr_empty(ifp, txr->br))
igb_mq_start_locked(ifp, txr, NULL);
#else
- igb_start_locked(txr, ifp);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ igb_start_locked(txr, ifp);
#endif
IGB_TX_UNLOCK(txr);
/* Do we need another? */
- if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
+ if (more) {
taskqueue_enqueue(que->tq, &que->que_task);
return;
}
@@ -1365,8 +1380,35 @@ igb_handle_link(void *context, int pending)
{
struct adapter *adapter = context;
+ IGB_CORE_LOCK(adapter);
+ igb_handle_link_locked(adapter);
+ IGB_CORE_UNLOCK(adapter);
+}
+
+static void
+igb_handle_link_locked(struct adapter *adapter)
+{
+ struct tx_ring *txr = adapter->tx_rings;
+ struct ifnet *ifp = adapter->ifp;
+
+ IGB_CORE_LOCK_ASSERT(adapter);
adapter->hw.mac.get_link_status = 1;
igb_update_link_status(adapter);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IGB_TX_LOCK(txr);
+#if __FreeBSD_version >= 800000
+ /* Process the stack queue only if not depleted */
+ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) &&
+ !drbr_empty(ifp, txr->br))
+ igb_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ igb_start_locked(txr, ifp);
+#endif
+ IGB_TX_UNLOCK(txr);
+ }
+ }
}
/*********************************************************************
@@ -1446,7 +1488,7 @@ igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
/* Link status change */
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
- igb_handle_link(adapter, 0);
+ igb_handle_link_locked(adapter);
if (reg_icr & E1000_ICR_RXO)
adapter->rx_overruns++;
@@ -1463,7 +1505,8 @@ igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
if (!drbr_empty(ifp, txr->br))
igb_mq_start_locked(ifp, txr, NULL);
#else
- igb_start_locked(txr, ifp);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ igb_start_locked(txr, ifp);
#endif
IGB_TX_UNLOCK(txr);
return POLL_RETURN_COUNT(rx_done);
@@ -1480,16 +1523,26 @@ igb_msix_que(void *arg)
{
struct igb_queue *que = arg;
struct adapter *adapter = que->adapter;
+ struct ifnet *ifp = adapter->ifp;
struct tx_ring *txr = que->txr;
struct rx_ring *rxr = que->rxr;
u32 newitr = 0;
- bool more_tx, more_rx;
+ bool more_rx;
E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims);
++que->irqs;
IGB_TX_LOCK(txr);
- more_tx = igb_txeof(txr);
+ igb_txeof(txr);
+#if __FreeBSD_version >= 800000
+ /* Process the stack queue only if not depleted */
+ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) &&
+ !drbr_empty(ifp, txr->br))
+ igb_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ igb_start_locked(txr, ifp);
+#endif
IGB_TX_UNLOCK(txr);
more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL);
@@ -1547,7 +1600,7 @@ igb_msix_que(void *arg)
no_calc:
/* Schedule a clean task if needed*/
- if (more_tx || more_rx)
+ if (more_rx)
taskqueue_enqueue(que->tq, &que->que_task);
else
/* Reenable this interrupt */
OpenPOWER on IntegriCloud