summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authortruckman <truckman@FreeBSD.org>2003-07-16 01:00:39 +0000
committertruckman <truckman@FreeBSD.org>2003-07-16 01:00:39 +0000
commit68ed1d12ac6cbb18345fb3bcdb41d6807907da13 (patch)
tree38927c46605f096de1a9a940591f96914a47bd82 /sys/kern
parente611c416998a4d947f82577fc6d0507a5dbd56f0 (diff)
downloadFreeBSD-src-68ed1d12ac6cbb18345fb3bcdb41d6807907da13.zip
FreeBSD-src-68ed1d12ac6cbb18345fb3bcdb41d6807907da13.tar.gz
Rearrange the SYSINIT order to call lockmgr_init() earlier so that
the runtime lockmgr initialization code in lockinit() can be eliminated. Reviewed by: jhb
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c30
-rw-r--r--sys/kern/kern_mtxpool.c2
2 files changed, 4 insertions, 28 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index d765fb6..b088747 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -74,7 +74,6 @@ __FBSDID("$FreeBSD$");
* share a fixed (at boot time) number of mutexes across all lockmgr locks in
* order to keep sizeof(struct lock) down.
*/
-int lock_mtx_valid;
static struct mtx lock_mtx;
static int acquire(struct lock **lkpp, int extflags, int wanted);
@@ -84,18 +83,9 @@ static int acquiredrain(struct lock *lkp, int extflags) ;
static void
lockmgr_init(void *dummy __unused)
{
- /*
- * Initialize the lockmgr protection mutex if it hasn't already been
- * done. Unless something changes about kernel startup order, VM
- * initialization will always cause this mutex to already be
- * initialized in a call to lockinit().
- */
- if (lock_mtx_valid == 0) {
- mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
- lock_mtx_valid = 1;
- }
+ mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
}
-SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL)
+SYSINIT(lmgrinit, SI_SUB_LOCKMGR, SI_ORDER_FIRST, lockmgr_init, NULL)
static LOCK_INLINE void
sharelock(struct lock *lkp, int incr) {
@@ -539,21 +529,7 @@ lockinit(lkp, prio, wmesg, timo, flags)
CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
"timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
- if (lock_mtx_valid == 0) {
- mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
- lock_mtx_valid = 1;
- }
- /*
- * XXX cleanup - make sure mtxpool is always initialized before
- * this is ever called.
- */
- if (mtxpool_lockbuilder != NULL) {
- mtx_lock(&lock_mtx);
- lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
- mtx_unlock(&lock_mtx);
- } else {
- lkp->lk_interlock = &lock_mtx;
- }
+ lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
lkp->lk_flags = (flags & LK_EXTFLG_MASK);
lkp->lk_sharecount = 0;
lkp->lk_waitcount = 0;
diff --git a/sys/kern/kern_mtxpool.c b/sys/kern/kern_mtxpool.c
index 93ce655..468cdb1 100644
--- a/sys/kern/kern_mtxpool.c
+++ b/sys/kern/kern_mtxpool.c
@@ -186,7 +186,7 @@ mtx_pool_alloc(struct mtx_pool *pool)
* The lockbuilder pool must be initialized early because the lockmgr
* and sx locks depend on it. The sx locks are used in the kernel
* memory allocator. The lockmgr subsystem is initialized by
- * SYSINIT(..., SI_SUB_LOCK, ...).
+ * SYSINIT(..., SI_SUB_LOCKMGR, ...).
*
* We can't call MALLOC() to dynamically allocate the sleep pool
* until after kmeminit() has been called, which is done by
OpenPOWER on IntegriCloud