summaryrefslogtreecommitdiffstats
path: root/sys/dev/e1000/if_em.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2012-03-30 19:54:48 +0000
committerjhb <jhb@FreeBSD.org>2012-03-30 19:54:48 +0000
commit0f7a6b4eb78584a88dfb32549c62e351d32b33fe (patch)
tree04b0eb50781f96b1e7e2e14293fb989d98971e1b /sys/dev/e1000/if_em.c
parent1dd88fec2f6597efdfc693ade6e17b4a47b033c3 (diff)
downloadFreeBSD-src-0f7a6b4eb78584a88dfb32549c62e351d32b33fe.zip
FreeBSD-src-0f7a6b4eb78584a88dfb32549c62e351d32b33fe.tar.gz
Fix a few issues with transmit handling in em(4) and igb(4):
- Do not define the foo_start() methods or set if_start in the ifnet if multiq transmit is enabled. Also, set if_transmit and if_qflush before ether_ifattach rather than after when multiq transmit is enabled. This helps to ensure that the drivers never try to mix different transmit methods. - Properly restart transmit during resume. igb(4) was not restarting it at all, and em(4) was restarting even if the link was down and was calling the wrong method if multiq transmit was enabled. - Remove all the 'more' handling for transmit completions. Transmit completion processing does not have a processing limit, so it always runs to completion and never has more work to do when it returns. Instead, the previous code was returning 'true' anytime there were packets in the queue that weren't still in the process of being transmitted. The effect was that the driver would continuously reschedule a task to process TX completions in effect running at 100% CPU polling the hardware until it finished transmitting all of the packets in the ring. Now it will just wait for the next TX completion interrupt. - Restart packet transmission when the link becomes active. - Fix the MSI-X queue interrupt handlers to restart packet transmission if there are pending packets in the relevant software queue (IFQ or buf_ring) after processing TX completions. This is the root cause for the OACTIVE hangs as if the MSI-X queue handler drained all the pending packets from the TX ring, nothing would ever restart it. As such, remove some previously-added workarounds to reschedule a task to poll the TX ring anytime OACTIVE was set. Tested by: sbruno Reviewed by: jfv MFC after: 1 week
Diffstat (limited to 'sys/dev/e1000/if_em.c')
-rw-r--r--sys/dev/e1000/if_em.c96
1 files changed, 63 insertions, 33 deletions
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 8ab511d..9c31dad 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -193,13 +193,14 @@ static int em_detach(device_t);
static int em_shutdown(device_t);
static int em_suspend(device_t);
static int em_resume(device_t);
-static void em_start(struct ifnet *);
-static void em_start_locked(struct ifnet *, struct tx_ring *);
#ifdef EM_MULTIQUEUE
static int em_mq_start(struct ifnet *, struct mbuf *);
static int em_mq_start_locked(struct ifnet *,
struct tx_ring *, struct mbuf *);
static void em_qflush(struct ifnet *);
+#else
+static void em_start(struct ifnet *);
+static void em_start_locked(struct ifnet *, struct tx_ring *);
#endif
static int em_ioctl(struct ifnet *, u_long, caddr_t);
static void em_init(void *);
@@ -234,7 +235,7 @@ static void em_enable_intr(struct adapter *);
static void em_disable_intr(struct adapter *);
static void em_update_stats_counters(struct adapter *);
static void em_add_hw_stats(struct adapter *adapter);
-static bool em_txeof(struct tx_ring *);
+static void em_txeof(struct tx_ring *);
static bool em_rxeof(struct rx_ring *, int, int *);
#ifndef __NO_STRICT_ALIGNMENT
static int em_fixup_rx(struct rx_ring *);
@@ -847,6 +848,7 @@ static int
em_resume(device_t dev)
{
struct adapter *adapter = device_get_softc(dev);
+ struct tx_ring *txr = adapter->tx_rings;
struct ifnet *ifp = adapter->ifp;
EM_CORE_LOCK(adapter);
@@ -854,8 +856,22 @@ em_resume(device_t dev)
e1000_resume_workarounds_pchlan(&adapter->hw);
em_init_locked(adapter);
em_init_manageability(adapter);
+
+ if ((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ EM_TX_LOCK(txr);
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
+#endif
+ EM_TX_UNLOCK(txr);
+ }
+ }
EM_CORE_UNLOCK(adapter);
- em_start(ifp);
return bus_generic_resume(dev);
}
@@ -959,7 +975,7 @@ em_qflush(struct ifnet *ifp)
}
if_qflush(ifp);
}
-#endif /* EM_MULTIQUEUE */
+#else /* !EM_MULTIQUEUE */
static void
em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
@@ -1020,14 +1036,9 @@ em_start(struct ifnet *ifp)
em_start_locked(ifp, txr);
EM_TX_UNLOCK(txr);
}
- /*
- ** If we went inactive schedule
- ** a task to clean up.
- */
- if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
- taskqueue_enqueue(txr->tq, &txr->tx_task);
return;
}
+#endif /* EM_MULTIQUEUE */
/*********************************************************************
* Ioctl entry point
@@ -1424,7 +1435,8 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- em_start_locked(ifp, txr);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
#endif
EM_TX_UNLOCK(txr);
@@ -1497,10 +1509,11 @@ em_handle_que(void *context, int pending)
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- em_start_locked(ifp, txr);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
#endif
EM_TX_UNLOCK(txr);
- if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
+ if (more) {
taskqueue_enqueue(adapter->tq, &adapter->que_task);
return;
}
@@ -1521,17 +1534,21 @@ em_msix_tx(void *arg)
{
struct tx_ring *txr = arg;
struct adapter *adapter = txr->adapter;
- bool more;
+ struct ifnet *ifp = adapter->ifp;
++txr->tx_irq;
EM_TX_LOCK(txr);
- more = em_txeof(txr);
+ em_txeof(txr);
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
+#endif
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
EM_TX_UNLOCK(txr);
- if (more)
- taskqueue_enqueue(txr->tq, &txr->tx_task);
- else
- /* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
return;
}
@@ -1609,7 +1626,8 @@ em_handle_tx(void *context, int pending)
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- em_start_locked(ifp, txr);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
#endif
E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
EM_TX_UNLOCK(txr);
@@ -1619,6 +1637,7 @@ static void
em_handle_link(void *context, int pending)
{
struct adapter *adapter = context;
+ struct tx_ring *txr = adapter->tx_rings;
struct ifnet *ifp = adapter->ifp;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
@@ -1630,6 +1649,19 @@ em_handle_link(void *context, int pending)
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
E1000_WRITE_REG(&adapter->hw, E1000_IMS,
EM_MSIX_LINK | E1000_IMS_LSC);
+ if (adapter->link_active) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ EM_TX_LOCK(txr);
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
+#endif
+ EM_TX_UNLOCK(txr);
+ }
+ }
EM_CORE_UNLOCK(adapter);
}
@@ -2902,20 +2934,21 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_softc = adapter;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = em_ioctl;
+#ifdef EM_MULTIQUEUE
+ /* Multiqueue stack interface */
+ ifp->if_transmit = em_mq_start;
+ ifp->if_qflush = em_qflush;
+#else
ifp->if_start = em_start;
IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
IFQ_SET_READY(&ifp->if_snd);
+#endif
ether_ifattach(ifp, adapter->hw.mac.addr);
ifp->if_capabilities = ifp->if_capenable = 0;
-#ifdef EM_MULTIQUEUE
- /* Multiqueue stack interface */
- ifp->if_transmit = em_mq_start;
- ifp->if_qflush = em_qflush;
-#endif
ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
ifp->if_capabilities |= IFCAP_TSO4;
@@ -3742,7 +3775,7 @@ em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
* tx_buffer is put back on the free queue.
*
**********************************************************************/
-static bool
+static void
em_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
@@ -3762,14 +3795,14 @@ em_txeof(struct tx_ring *txr)
selwakeuppri(&na->tx_si, PI_NET);
EM_CORE_UNLOCK(adapter);
EM_TX_LOCK(txr);
- return (FALSE);
+ return;
}
#endif /* DEV_NETMAP */
/* No work, make sure watchdog is off */
if (txr->tx_avail == adapter->num_tx_desc) {
txr->queue_status = EM_QUEUE_IDLE;
- return (FALSE);
+ return;
}
processed = 0;
@@ -3858,10 +3891,7 @@ em_txeof(struct tx_ring *txr)
/* Disable watchdog if all clean */
if (txr->tx_avail == adapter->num_tx_desc) {
txr->queue_status = EM_QUEUE_IDLE;
- return (FALSE);
}
-
- return (TRUE);
}
OpenPOWER on IntegriCloud