summaryrefslogtreecommitdiffstats
path: root/sys/dev/fxp/if_fxp.c
diff options
context:
space:
mode:
authormux <mux@FreeBSD.org>2004-06-02 22:59:57 +0000
committermux <mux@FreeBSD.org>2004-06-02 22:59:57 +0000
commit4bd9617d60a6bf1614ec12fecc67a704393b41fa (patch)
treee6606951cd0163ac105b4782b2bdc3001bbe4ac7 /sys/dev/fxp/if_fxp.c
parentaaf8d85c22e2152aca2bf163140195ce51d71c48 (diff)
downloadFreeBSD-src-4bd9617d60a6bf1614ec12fecc67a704393b41fa.zip
FreeBSD-src-4bd9617d60a6bf1614ec12fecc67a704393b41fa.tar.gz
Abstract the locking in fxp(4) a bit more by using macros for
mtx_assert() and mtx_owned(), as it is done in other places, for instance proc locking.
Diffstat (limited to 'sys/dev/fxp/if_fxp.c')
-rw-r--r--sys/dev/fxp/if_fxp.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/sys/dev/fxp/if_fxp.c b/sys/dev/fxp/if_fxp.c
index fd87125..9d6c8ea 100644
--- a/sys/dev/fxp/if_fxp.c
+++ b/sys/dev/fxp/if_fxp.c
@@ -863,7 +863,7 @@ fxp_release(struct fxp_softc *sc)
struct fxp_tx *txp;
int i;
- mtx_assert(&sc->sc_mtx, MA_NOTOWNED);
+ FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
KASSERT(sc->ih == NULL,
("fxp_release() called with intr handle still active"));
if (sc->miibus)
@@ -1295,7 +1295,7 @@ fxp_start_body(struct ifnet *ifp)
struct mbuf *mb_head;
int error;
- mtx_assert(&sc->sc_mtx, MA_OWNED);
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
/*
* See if we need to suspend xmit until the multicast filter
* has been reprogrammed (which can only be done at the head
@@ -1634,7 +1634,7 @@ fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, u_int8_t statack,
struct fxp_rfa *rfa;
int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
- mtx_assert(&sc->sc_mtx, MA_OWNED);
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
if (rnr)
sc->rnr++;
#ifdef DEVICE_POLLING
@@ -1993,7 +1993,7 @@ fxp_init_body(struct fxp_softc *sc)
struct fxp_cb_mcs *mcsp;
int i, prm, s;
- mtx_assert(&sc->sc_mtx, MA_OWNED);
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
s = splimp();
/*
* Cancel any pending I/O
@@ -2434,7 +2434,7 @@ fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
* Detaching causes us to call ioctl with the mutex owned. Preclude
* that by saying we're busy if the lock is already held.
*/
- if (mtx_owned(&sc->sc_mtx))
+ if (FXP_LOCKED(sc))
return (EBUSY);
FXP_LOCK(sc);
@@ -2517,7 +2517,7 @@ fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
FXP_UNLOCK(sc);
error = ether_ioctl(ifp, command, data);
}
- if (mtx_owned(&sc->sc_mtx))
+ if (FXP_LOCKED(sc))
FXP_UNLOCK(sc);
splx(s);
return (error);
@@ -2579,6 +2579,7 @@ fxp_mc_setup(struct fxp_softc *sc)
struct fxp_tx *txp;
int count;
+ FXP_LOCK_ASSERT(sc, MA_OWNED);
/*
* If there are queued commands, we must wait until they are all
* completed. If we are already waiting, then add a NOP command
OpenPOWER on IntegriCloud