summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2008-01-08 23:48:31 +0000
committerattilio <attilio@FreeBSD.org>2008-01-08 23:48:31 +0000
commit9e182da1916d63733010443cc5d45688fe47c3e7 (patch)
tree8b4bc5a73ee6f8eec360e2bca8b5a48ec5a6bea6 /sys
parent1975c095433557ab04b0b28101fb49a9320af637 (diff)
downloadFreeBSD-src-9e182da1916d63733010443cc5d45688fe47c3e7.zip
FreeBSD-src-9e182da1916d63733010443cc5d45688fe47c3e7.tar.gz
Remove explicit calling of lockmgr() with the NULL argument.
Now, lockmgr() function can only be called passing curthread and the KASSERT() is upgraded according with this. In order to support on-the-fly owner switching, the new function lockmgr_disown() has been introduced and gets used in BUF_KERNPROC(). KPI, so, results changed and FreeBSD version will be bumped soon. Differently from previous code, we assume idle thread cannot try to acquire the lockmgr as it cannot sleep, so loose the relative check[1] in BUF_KERNPROC(). Tested by: kris [1] kib asked for a KASSERT in the lockmgr_disown() about this condition, but after thinking at it, as this is a well known general rule, I found it not really necessary.
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/usb/ehci.c4
-rw-r--r--sys/dev/usb/if_udav.c4
-rw-r--r--sys/fs/hpfs/hpfs_subr.h6
-rw-r--r--sys/fs/ntfs/ntfs_subr.c16
-rw-r--r--sys/kern/kern_lock.c65
-rw-r--r--sys/sys/buf.h5
-rw-r--r--sys/sys/lockmgr.h1
7 files changed, 60 insertions, 41 deletions
diff --git a/sys/dev/usb/ehci.c b/sys/dev/usb/ehci.c
index 6b99d54..127d0f7 100644
--- a/sys/dev/usb/ehci.c
+++ b/sys/dev/usb/ehci.c
@@ -1560,7 +1560,7 @@ ehci_sync_hc(ehci_softc_t *sc)
}
DPRINTFN(2,("ehci_sync_hc: enter\n"));
/* get doorbell */
- lockmgr(&sc->sc_doorbell_lock, LK_EXCLUSIVE, NULL, NULL);
+ lockmgr(&sc->sc_doorbell_lock, LK_EXCLUSIVE, NULL, curthread);
s = splhardusb();
/* ask for doorbell */
EOWRITE4(sc, EHCI_USBCMD, EOREAD4(sc, EHCI_USBCMD) | EHCI_CMD_IAAD);
@@ -1571,7 +1571,7 @@ ehci_sync_hc(ehci_softc_t *sc)
EOREAD4(sc, EHCI_USBCMD), EOREAD4(sc, EHCI_USBSTS)));
splx(s);
/* release doorbell */
- lockmgr(&sc->sc_doorbell_lock, LK_RELEASE, NULL, NULL);
+ lockmgr(&sc->sc_doorbell_lock, LK_RELEASE, NULL, curthread);
#ifdef DIAGNOSTIC
if (error)
printf("ehci_sync_hc: tsleep() = %d\n", error);
diff --git a/sys/dev/usb/if_udav.c b/sys/dev/usb/if_udav.c
index cc42477..869132c 100644
--- a/sys/dev/usb/if_udav.c
+++ b/sys/dev/usb/if_udav.c
@@ -1780,7 +1780,7 @@ udav_lock_mii(struct udav_softc *sc)
#if defined(__NetBSD__)
lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL);
#elif defined(__FreeBSD__)
- lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL, NULL);
+ lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL, curthread);
#endif
}
@@ -1793,7 +1793,7 @@ udav_unlock_mii(struct udav_softc *sc)
#if defined(__NetBSD__)
lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL);
#elif defined(__FreeBSD__)
- lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL, NULL);
+ lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL, curthread);
#endif
if (--sc->sc_refcnt < 0)
usb_detach_wakeup(sc->sc_dev);
diff --git a/sys/fs/hpfs/hpfs_subr.h b/sys/fs/hpfs/hpfs_subr.h
index 4cddcf7..dd00432 100644
--- a/sys/fs/hpfs/hpfs_subr.h
+++ b/sys/fs/hpfs/hpfs_subr.h
@@ -78,8 +78,10 @@ int hpfs_breadstruct (struct hpfsmount *, lsn_t, u_int, u_int32_t,
hpfs_breadstruct(hpmp, lsn, D_BSIZE, D_MAGIC, bpp)
#if 0
-#define hpfs_hplock(hp, p) lockmgr(&(hp)->h_intlock, LK_EXCLUSIVE, (p), NULL)
-#define hpfs_hpunlock(hp, p) lockmgr(&(hp)->h_intlock, LK_RELEASE, (p), NULL)
+#define hpfs_hplock(hp, p) \
+ lockmgr(&(hp)->h_intlock, LK_EXCLUSIVE, (p), curthread)
+#define hpfs_hpunlock(hp, p) \
+ lockmgr(&(hp)->h_intlock, LK_RELEASE, (p), curthread)
#endif
int hpfs_hpbmap (struct hpfsnode *, daddr_t, daddr_t *, int *);
diff --git a/sys/fs/ntfs/ntfs_subr.c b/sys/fs/ntfs/ntfs_subr.c
index 972cfb0..330252f 100644
--- a/sys/fs/ntfs/ntfs_subr.c
+++ b/sys/fs/ntfs/ntfs_subr.c
@@ -359,7 +359,7 @@ ntfs_ntget(ip)
mtx_lock(&ip->i_interlock);
ip->i_usecount++;
lockmgr(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock,
- NULL);
+ curthread);
return 0;
}
@@ -391,7 +391,7 @@ ntfs_ntlookup(
return (0);
}
} while (lockmgr(&ntfs_hashlock, LK_EXCLUSIVE | LK_SLEEPFAIL, NULL,
- NULL));
+ curthread));
MALLOC(ip, struct ntnode *, sizeof(struct ntnode), M_NTFSNTNODE,
M_WAITOK | M_ZERO);
@@ -413,7 +413,7 @@ ntfs_ntlookup(
ntfs_nthashins(ip);
- lockmgr(&ntfs_hashlock, LK_RELEASE, NULL, NULL);
+ lockmgr(&ntfs_hashlock, LK_RELEASE, NULL, curthread);
*ipp = ip;
@@ -450,7 +450,7 @@ ntfs_ntput(ip)
if (ip->i_usecount > 0) {
lockmgr(&ip->i_lock, LK_RELEASE|LK_INTERLOCK, &ip->i_interlock,
- NULL);
+ curthread);
return;
}
@@ -1982,7 +1982,7 @@ ntfs_toupper_use(mp, ntmp)
struct vnode *vp;
/* get exclusive access */
- lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, NULL);
+ lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, curthread);
/* only read the translation data from a file if it hasn't been
* read already */
@@ -2005,7 +2005,7 @@ ntfs_toupper_use(mp, ntmp)
out:
ntfs_toupper_usecount++;
- lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, NULL);
+ lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, curthread);
return (error);
}
@@ -2017,7 +2017,7 @@ void
ntfs_toupper_unuse()
{
/* get exclusive access */
- lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, NULL);
+ lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, curthread);
ntfs_toupper_usecount--;
if (ntfs_toupper_usecount == 0) {
@@ -2032,7 +2032,7 @@ ntfs_toupper_unuse()
#endif
/* release the lock */
- lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, NULL);
+ lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, curthread);
}
int
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 8d1a219..6e5a7b3 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -105,7 +105,7 @@ unlock_lockmgr(struct lock_object *lock)
panic("lockmgr locks do not support sleep interlocking");
}
-#define COUNT(td, x) if ((td)) (td)->td_locks += (x)
+#define COUNT(td, x) ((td)->td_locks += (x))
#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
LK_SHARE_NONZERO | LK_WAIT_NONZERO)
@@ -194,24 +194,18 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
{
int error;
- struct thread *thr;
int extflags, lockflags;
int contested = 0;
uint64_t waitstart = 0;
/*
- * Lock owner can only be curthread or, at least, NULL in order to
- * have a deadlock free implementation of the primitive.
+ * Lock owner can only be curthread in order to have a deadlock
+ * free implementation of the primitive.
*/
- KASSERT(td == NULL || td == curthread,
- ("lockmgr: owner thread (%p) cannot differ from curthread or NULL",
- td));
+ KASSERT(td == curthread,
+ ("lockmgr: owner thread (%p) cannot differ from curthread", td));
error = 0;
- if (td == NULL)
- thr = LK_KERNPROC;
- else
- thr = td;
if ((flags & LK_INTERNAL) == 0)
mtx_lock(lkp->lk_interlock);
@@ -260,7 +254,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* lock requests or upgrade requests ( but not the exclusive
* lock itself ).
*/
- if (lkp->lk_lockholder != thr) {
+ if (lkp->lk_lockholder != td) {
lockflags = LK_HAVE_EXCL;
if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
@@ -286,10 +280,10 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
/* FALLTHROUGH downgrade */
case LK_DOWNGRADE:
- KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
+ KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0,
("lockmgr: not holding exclusive lock "
"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
- lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
+ lkp->lk_lockholder, td, lkp->lk_exclusivecount));
sharelock(td, lkp, lkp->lk_exclusivecount);
COUNT(td, -lkp->lk_exclusivecount);
lkp->lk_exclusivecount = 0;
@@ -308,7 +302,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
- if (lkp->lk_lockholder == thr)
+ if (lkp->lk_lockholder == td)
panic("lockmgr: upgrade exclusive lock");
if (lkp->lk_sharecount <= 0)
panic("lockmgr: upgrade without shared");
@@ -342,7 +336,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = thr;
+ lkp->lk_lockholder = td;
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
@@ -362,7 +356,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
/* FALLTHROUGH exclusive request */
case LK_EXCLUSIVE:
- if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
+ if (lkp->lk_lockholder == td) {
/*
* Recursive lock.
*/
@@ -400,7 +394,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
break;
}
lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = thr;
+ lkp->lk_lockholder = td;
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
@@ -413,10 +407,10 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
case LK_RELEASE:
if (lkp->lk_exclusivecount != 0) {
- if (lkp->lk_lockholder != thr &&
+ if (lkp->lk_lockholder != td &&
lkp->lk_lockholder != LK_KERNPROC) {
panic("lockmgr: thread %p, not %s %p unlocking",
- thr, "exclusive lock holder",
+ td, "exclusive lock holder",
lkp->lk_lockholder);
}
if (lkp->lk_lockholder != LK_KERNPROC)
@@ -433,7 +427,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
shareunlock(td, lkp, 1);
else {
printf("lockmgr: thread %p unlocking unheld lock\n",
- thr);
+ td);
kdb_backtrace();
}
@@ -448,14 +442,14 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* check for holding a shared lock, but at least we can
* check for an exclusive one.
*/
- if (lkp->lk_lockholder == thr)
+ if (lkp->lk_lockholder == td)
panic("lockmgr: draining against myself");
error = acquiredrain(lkp, extflags);
if (error)
break;
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
- lkp->lk_lockholder = thr;
+ lkp->lk_lockholder = td;
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
#if defined(DEBUG_LOCKS)
@@ -544,6 +538,31 @@ lockdestroy(lkp)
}
/*
+ * Disown the lockmgr.
+ */
+void
+lockmgr_disown(struct lock *lkp)
+{
+ struct thread *td;
+
+ td = curthread;
+ KASSERT(lkp->lk_exclusivecount || lkp->lk_lockholder == LK_KERNPROC,
+ ("%s: %p lockmgr must be exclusively locked", __func__, lkp));
+ KASSERT(lkp->lk_lockholder == td,
+ ("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp,
+ td));
+
+ /*
+ * Drop the lock reference and switch the owner. This will result
+ * in an atomic operation like td_lock is only accessed by curthread
+ * and lk_lockholder only needs one write.
+ */
+ if (lkp->lk_lockholder == td)
+ td->td_locks--;
+ lkp->lk_lockholder = LK_KERNPROC;
+}
+
+/*
* Determine the status of a lock.
*/
int
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index 810d7ba..a684f93 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -340,11 +340,8 @@ static __inline void BUF_KERNPROC(struct buf *);
static __inline void
BUF_KERNPROC(struct buf *bp)
{
- struct thread *td = curthread;
- if (!TD_IS_IDLETHREAD(td) && bp->b_lock.lk_lockholder == td)
- td->td_locks--;
- bp->b_lock.lk_lockholder = LK_KERNPROC;
+ lockmgr_disown(&bp->b_lock);
}
#endif
/*
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index 6fe9e63..a305a73f8 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -197,6 +197,7 @@ void lockdestroy(struct lock *);
int _lockmgr(struct lock *, u_int flags,
struct mtx *, struct thread *p, char *file, int line);
+void lockmgr_disown(struct lock *);
void lockmgr_printinfo(struct lock *);
int lockstatus(struct lock *, struct thread *);
int lockcount(struct lock *);
OpenPOWER on IntegriCloud