summaryrefslogtreecommitdiffstats
path: root/drivers/net/r8169.c
diff options
context:
space:
mode:
authorDavid Dillow <dave@thedillows.org>2009-05-22 15:29:34 +0000
committerDavid S. Miller <davem@davemloft.net>2009-05-25 22:55:26 -0700
commitf11a377b3f4e897d11f0e8d1fc688667e2f19708 (patch)
tree80a6a29154a1af059e6b8d917b7858d6c897b906 /drivers/net/r8169.c
parentc80a5cdfc5ca6533cb893154f546370da1fdb8f0 (diff)
downloadop-kernel-dev-f11a377b3f4e897d11f0e8d1fc688667e2f19708.zip
op-kernel-dev-f11a377b3f4e897d11f0e8d1fc688667e2f19708.tar.gz
r8169: avoid losing MSI interrupts
The 8169 chip only generates MSI interrupts when all enabled event sources are quiescent and one or more sources transition to active. If not all of the active events are acknowledged, or a new event becomes active while the existing ones are cleared in the handler, we will not see a new interrupt. The current interrupt handler masks off the Rx and Tx events once the NAPI handler has been scheduled, which opens a race window in which we can get another Rx or Tx event and never ACK'ing it, stopping all activity until the link is reset (ifconfig down/up). Fix this by always ACK'ing all event sources, and loop in the handler until we have all sources quiescent. Signed-off-by: David Dillow <dave@thedillows.org> Tested-by: Michael Buesch <mb@bu3sch.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/r8169.c')
-rw-r--r--drivers/net/r8169.c102
1 files changed, 57 insertions, 45 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0b6e8c8..8247a94 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
int handled = 0;
int status;
+ /* loop handling interrupts until we have no new ones or
+ * we hit a invalid/hotplug case.
+ */
status = RTL_R16(IntrStatus);
+ while (status && status != 0xffff) {
+ handled = 1;
- /* hotplug/major error/no more work/shared irq */
- if ((status == 0xffff) || !status)
- goto out;
-
- handled = 1;
+ /* Handle all of the error cases first. These will reset
+ * the chip, so just exit the loop.
+ */
+ if (unlikely(!netif_running(dev))) {
+ rtl8169_asic_down(ioaddr);
+ break;
+ }
- if (unlikely(!netif_running(dev))) {
- rtl8169_asic_down(ioaddr);
- goto out;
- }
+ /* Work around for rx fifo overflow */
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+ netif_stop_queue(dev);
+ rtl8169_tx_timeout(dev);
+ break;
+ }
- status &= tp->intr_mask;
- RTL_W16(IntrStatus,
- (status & RxFIFOOver) ? (status | RxOverflow) : status);
+ if (unlikely(status & SYSErr)) {
+ rtl8169_pcierr_interrupt(dev);
+ break;
+ }
- if (!(status & tp->intr_event))
- goto out;
+ if (status & LinkChg)
+ rtl8169_check_link_status(dev, tp, ioaddr);
- /* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(dev);
- rtl8169_tx_timeout(dev);
- goto out;
- }
+ /* We need to see the lastest version of tp->intr_mask to
+ * avoid ignoring an MSI interrupt and having to wait for
+ * another event which may never come.
+ */
+ smp_rmb();
+ if (status & tp->intr_mask & tp->napi_event) {
+ RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
+ tp->intr_mask = ~tp->napi_event;
+
+ if (likely(napi_schedule_prep(&tp->napi)))
+ __napi_schedule(&tp->napi);
+ else if (netif_msg_intr(tp)) {
+ printk(KERN_INFO "%s: interrupt %04x in poll\n",
+ dev->name, status);
+ }
+ }
- if (unlikely(status & SYSErr)) {
- rtl8169_pcierr_interrupt(dev);
- goto out;
+ /* We only get a new MSI interrupt when all active irq
+ * sources on the chip have been acknowledged. So, ack
+ * everything we've seen and check if new sources have become
+ * active to avoid blocking all interrupts from the chip.
+ */
+ RTL_W16(IntrStatus,
+ (status & RxFIFOOver) ? (status | RxOverflow) : status);
+ status = RTL_R16(IntrStatus);
}
- if (status & LinkChg)
- rtl8169_check_link_status(dev, tp, ioaddr);
-
- if (status & tp->napi_event) {
- RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
- tp->intr_mask = ~tp->napi_event;
-
- if (likely(napi_schedule_prep(&tp->napi)))
- __napi_schedule(&tp->napi);
- else if (netif_msg_intr(tp)) {
- printk(KERN_INFO "%s: interrupt %04x in poll\n",
- dev->name, status);
- }
- }
-out:
return IRQ_RETVAL(handled);
}
@@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
- tp->intr_mask = 0xffff;
- /*
- * 20040426: the barrier is not strictly required but the
- * behavior of the irq handler could be less predictable
- * without it. Btw, the lack of flush for the posted pci
- * write is safe - FR
+
+ /* We need for force the visibility of tp->intr_mask
+ * for other CPUs, as we can loose an MSI interrupt
+ * and potentially wait for a retransmit timeout if we don't.
+ * The posted write to IntrMask is safe, as it will
+ * eventually make it to the chip and we won't loose anything
+ * until it does.
*/
+ tp->intr_mask = 0xffff;
smp_wmb();
RTL_W16(IntrMask, tp->intr_event);
}
OpenPOWER on IntegriCloud