diff options
-rw-r--r-- | sys/dev/pccbb/pccbb.c | 73 |
1 files changed, 47 insertions, 26 deletions
diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c index 78f8704..778614e 100644 --- a/sys/dev/pccbb/pccbb.c +++ b/sys/dev/pccbb/pccbb.c @@ -369,12 +369,6 @@ cbb_setup_intr(device_t dev, device_t child, struct resource *irq, if (filt == NULL && intr == NULL) return (EINVAL); - /* - * Well, this is no longer strictly true. You can have multiple - * FAST ISRs, but can't mix fast and slow, so we have to assume - * least common denominator until the base system supports mixing - * and matching better. - */ ih = malloc(sizeof(struct cbb_intrhand), M_DEVBUF, M_NOWAIT); if (ih == NULL) return (ENOMEM); @@ -595,25 +589,24 @@ cbb_removal(struct cbb_softc *sc) /************************************************************************/ /* - * Since we touch hardware in the worst case, we don't need to use atomic - * ops on the CARD_OK tests. They would save us a trip to the hardware - * if CARD_OK was recently cleared and the caches haven't updated yet. - * However, an atomic op costs between 100-200 CPU cycles. On a 3GHz - * machine, this is about 33-66ns, whereas a trip the the hardware - * is about that. On slower machines, the cost is even higher, so the - * trip to the hardware is cheaper and achieves the same ends that - * a fully locked operation would give us. + * Since we touch hardware in the worst case, we don't need to use atomic ops + * on the CARD_OK tests. They would save us a trip to the hardware if CARD_OK + * was recently cleared and the caches haven't updated yet. However, an + * atomic op costs between 100-200 CPU cycles. On a 3GHz machine, this is + * about 33-66ns, whereas a trip the the hardware is about that. On slower + * machines, the cost is even higher, so the trip to the hardware is cheaper + * and achieves the same ends that a fully locked operation would give us. * - * This is a separate routine because we'd have to use locking and/or - * other synchronization in cbb_intr to do this there. That would be - * even more expensive. + * This is a separate routine because we'd have to use locking and/or other + * synchronization in cbb_intr to do this there. That would be even more + * expensive. * - * I need to investigate what this means for a SMP machine with multiple - * CPUs servicing the ISR when an eject happens. In the case of a dirty - * eject, CD glitches and we might read 'card present' from the hardware - * due to this jitter. If we assumed that cbb_intr() ran before - * cbb_func_intr(), we could just check the SOCKET_MASK register and if - * CD changes were clear there, then we'd know the card was gone. + * I need to investigate what this means for a SMP machine with multiple CPUs + * servicing the ISR when an eject happens. In the case of a dirty eject, CD + * glitches and we might read 'card present' from the hardware due to this + * jitter. If we assumed that cbb_intr() ran before cbb_func_intr(), we could + * just check the SOCKET_MASK register and if CD changes were clear there, + * then we'd know the card was gone. */ static int cbb_func_filt(void *arg) @@ -628,12 +621,12 @@ cbb_func_filt(void *arg) return (FILTER_STRAY); if (!CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) { sc->flags &= ~CBB_CARD_OK; - return (FILTER_STRAY); + return (FILTER_HANDLED); } /* - * nb: don't have to check for giant or not, since that's done - * in the ISR dispatch + * nb: don't have to check for giant or not, since that's done in the + * ISR dispatch and one can't hold Giant in a filter anyway... */ if (ih->filt != NULL) return ((*ih->filt)(ih->arg)); @@ -644,7 +637,35 @@ static void cbb_func_intr(void *arg) { struct cbb_intrhand *ih = (struct cbb_intrhand *)arg; + struct cbb_softc *sc = ih->sc; + /* + * While this check may seem redundant, it helps close a race + * condition. If the card is ejected after the filter runs, but + * before this ISR can be scheduled, then we need to do the same + * filtering to prevent the card's ISR from being called. One could + * argue that the card's ISR should be able to cope, but experience + * has shown they can't always. This mitigates the problem by making + * the race quite a bit smaller. Properly written client ISRs should + * cope with the card going away in the middle of the ISR. We assume + * that drivers that are sophisticated enough to use filters don't + * need our protection. This also allows us to ensure they *ARE* + * called if their filter said they needed to be called. + */ + if (ih->filt == NULL) { + if ((sc->flags & CBB_CARD_OK) == 0) + return; + if (!CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) { + sc->flags &= ~CBB_CARD_OK; + return; + } + } + + /* + * Call the registered ithread interrupt handler. This entire routine + * will be called with Giant if this isn't an MP safe driver, or not + * if it is. Either way, we don't have to worry. + */ ih->intr(ih->arg); } |