summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_descrip.c2
-rw-r--r--sys/kern/kern_lock.c4
-rw-r--r--sys/kern/kern_mtxpool.c187
-rw-r--r--sys/kern/kern_prot.c2
-rw-r--r--sys/kern/kern_resource.c2
-rw-r--r--sys/kern/kern_sx.c2
-rw-r--r--sys/sys/kernel.h3
-rw-r--r--sys/sys/mutex.h28
8 files changed, 164 insertions, 66 deletions
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 43cbe5c..acda3f3 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -1209,7 +1209,7 @@ falloc(td, resultfp, resultfd)
* descriptor to the list of open files at that point, otherwise
* put it at the front of the list of open files.
*/
- fp->f_mtxp = mtx_pool_alloc();
+ fp->f_mtxp = mtx_pool_alloc(mtxpool_sleep);
fp->f_count = 1;
fp->f_cred = crhold(td->td_ucred);
fp->f_ops = &badfileops;
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 475ebaa..d765fb6 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -547,9 +547,9 @@ lockinit(lkp, prio, wmesg, timo, flags)
* XXX cleanup - make sure mtxpool is always initialized before
* this is ever called.
*/
- if (mtx_pool_valid) {
+ if (mtxpool_lockbuilder != NULL) {
mtx_lock(&lock_mtx);
- lkp->lk_interlock = mtx_pool_alloc();
+ lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
mtx_unlock(&lock_mtx);
} else {
lkp->lk_interlock = &lock_mtx;
diff --git a/sys/kern/kern_mtxpool.c b/sys/kern/kern_mtxpool.c
index b93fcfa..93ce655 100644
--- a/sys/kern/kern_mtxpool.c
+++ b/sys/kern/kern_mtxpool.c
@@ -35,85 +35,164 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/systm.h>
-#ifndef MTX_POOL_SIZE
-#define MTX_POOL_SIZE 128
-#endif
-#define MTX_POOL_MASK (MTX_POOL_SIZE - 1)
-static struct mtx mtx_pool_ary[MTX_POOL_SIZE];
+MALLOC_DEFINE(M_MTXPOOL, "mtx_pool", "mutex pool");
+
+/* Pool sizes must be a power of two */
+#ifndef MTX_POOL_LOCKBUILDER_SIZE
+#define MTX_POOL_LOCKBUILDER_SIZE 128
+#endif
+#ifndef MTX_POOL_SLEEP_SIZE
+#define MTX_POOL_SLEEP_SIZE 128
+#endif
-int mtx_pool_valid = 0;
+struct mtxpool_header {
+ int mtxpool_size;
+ int mtxpool_mask;
+ int mtxpool_shift;
+ int mtxpool_next;
+};
+
+struct mtx_pool {
+ struct mtxpool_header mtx_pool_header;
+ struct mtx mtx_pool_ary[1];
+};
+
+static struct mtx_pool_lockbuilder {
+ struct mtxpool_header mtx_pool_header;
+ struct mtx mtx_pool_ary[MTX_POOL_LOCKBUILDER_SIZE];
+} lockbuilder_pool;
+
+#define mtx_pool_size mtx_pool_header.mtxpool_size
+#define mtx_pool_mask mtx_pool_header.mtxpool_mask
+#define mtx_pool_shift mtx_pool_header.mtxpool_shift
+#define mtx_pool_next mtx_pool_header.mtxpool_next
+
+struct mtx_pool *mtxpool_sleep;
+struct mtx_pool *mtxpool_lockbuilder;
+
+#if UINTPTR_MAX == UINT64_MAX /* 64 bits */
+# define POINTER_BITS 64
+# define HASH_MULTIPLIER 11400714819323198485u /* (2^64)*(sqrt(5)-1)/2 */
+#else /* assume 32 bits */
+# define POINTER_BITS 32
+# define HASH_MULTIPLIER 2654435769u /* (2^32)*(sqrt(5)-1)/2 */
+#endif
/*
- * Inline version of mtx_pool_find(), used to streamline our main API
- * function calls.
+ * Return the (shared) pool mutex associated with the specified address.
+ * The returned mutex is a leaf level mutex, meaning that if you obtain it
+ * you cannot obtain any other mutexes until you release it. You can
+ * legally msleep() on the mutex.
*/
-static __inline struct mtx *
-_mtx_pool_find(void *ptr)
+struct mtx *
+mtx_pool_find(struct mtx_pool *pool, void *ptr)
{
- int p;
-
- p = (int)(uintptr_t)ptr;
- return (&mtx_pool_ary[(p ^ (p >> 6)) & MTX_POOL_MASK]);
+ int p;
+
+ KASSERT(pool != NULL, ("_mtx_pool_find(): null pool"));
+ /*
+ * Fibonacci hash, see Knuth's
+ * _Art of Computer Programming, Volume 3 / Sorting and Searching_
+ */
+ p = ((HASH_MULTIPLIER * (uintptr_t)ptr) >> pool->mtx_pool_shift) &
+ pool->mtx_pool_mask;
+ return (&pool->mtx_pool_ary[p]);
}
static void
-mtx_pool_setup(void *dummy __unused)
+mtx_pool_initialize(struct mtx_pool *pool, const char *mtx_name, int pool_size,
+ int opts)
{
- int i;
-
- for (i = 0; i < MTX_POOL_SIZE; ++i)
- mtx_init(&mtx_pool_ary[i], "pool mutex", NULL,
- MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
- mtx_pool_valid = 1;
+ int i, maskbits;
+
+ pool->mtx_pool_size = pool_size;
+ pool->mtx_pool_mask = pool_size - 1;
+ for (i = 1, maskbits = 0; (i & pool_size) == 0; i = i << 1)
+ maskbits++;
+ pool->mtx_pool_shift = POINTER_BITS - maskbits;
+ pool->mtx_pool_next = 0;
+ for (i = 0; i < pool_size; ++i)
+ mtx_init(&pool->mtx_pool_ary[i], mtx_name, NULL, opts);
}
-/*
- * Obtain a (shared) mutex from the pool. The returned mutex is a leaf
- * level mutex, meaning that if you obtain it you cannot obtain any other
- * mutexes until you release it. You can legally msleep() on the mutex.
- */
-struct mtx *
-mtx_pool_alloc(void)
+struct mtx_pool *
+mtx_pool_create(const char *mtx_name, int pool_size, int opts)
{
- static int si;
-
- return (&mtx_pool_ary[si++ & MTX_POOL_MASK]);
+ struct mtx_pool *pool;
+
+ if (pool_size <= 0 || !powerof2(pool_size)) {
+ printf("WARNING: %s pool size is not a power of 2.\n",
+ mtx_name);
+ pool_size = 128;
+ }
+ MALLOC(pool, struct mtx_pool *,
+ sizeof (struct mtx_pool) + ((pool_size - 1) * sizeof (struct mtx)),
+ M_MTXPOOL, M_WAITOK | M_ZERO);
+ mtx_pool_initialize(pool, mtx_name, pool_size, opts);
+ return pool;
}
-/*
- * Return the (shared) pool mutex associated with the specified address.
- * The returned mutex is a leaf level mutex, meaning that if you obtain it
- * you cannot obtain any other mutexes until you release it. You can
- * legally msleep() on the mutex.
- */
-struct mtx *
-mtx_pool_find(void *ptr)
+void
+mtx_pool_destroy(struct mtx_pool **poolp)
{
+ int i;
+ struct mtx_pool *pool = *poolp;
- return (_mtx_pool_find(ptr));
+ for (i = pool->mtx_pool_size - 1; i >= 0; --i)
+ mtx_destroy(&pool->mtx_pool_ary[i]);
+ FREE(pool, M_MTXPOOL);
+ *poolp = NULL;
}
-/*
- * Combined find/lock operation. Lock the pool mutex associated with
- * the specified address.
- */
-void
-mtx_pool_lock(void *ptr)
+static void
+mtx_pool_setup_static(void *dummy __unused)
{
+ mtx_pool_initialize((struct mtx_pool *)&lockbuilder_pool,
+ "lockbuilder mtxpool", MTX_POOL_LOCKBUILDER_SIZE,
+ MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
+ mtxpool_lockbuilder = (struct mtx_pool *)&lockbuilder_pool;
+}
- mtx_lock(_mtx_pool_find(ptr));
+static void
+mtx_pool_setup_dynamic(void *dummy __unused)
+{
+ mtxpool_sleep = mtx_pool_create("sleep mtxpool",
+ MTX_POOL_SLEEP_SIZE, MTX_DEF);
}
/*
- * Combined find/unlock operation. Unlock the pool mutex associated with
- * the specified address.
+ * Obtain a (shared) mutex from the pool. The returned mutex is a leaf
+ * level mutex, meaning that if you obtain it you cannot obtain any other
+ * mutexes until you release it. You can legally msleep() on the mutex.
*/
-void
-mtx_pool_unlock(void *ptr)
+struct mtx *
+mtx_pool_alloc(struct mtx_pool *pool)
{
-
- mtx_unlock(_mtx_pool_find(ptr));
+ int i;
+
+ KASSERT(pool != NULL, ("mtx_pool_alloc(): null pool"));
+ /*
+ * mtx_pool_next is unprotected against multiple accesses,
+ * but simultaneous access by two CPUs should not be very
+ * harmful.
+ */
+ i = pool->mtx_pool_next;
+ pool->mtx_pool_next = (i + 1) & pool->mtx_pool_mask;
+ return (&pool->mtx_pool_ary[i]);
}
-SYSINIT(mtxpooli, SI_SUB_MTX_POOL, SI_ORDER_FIRST, mtx_pool_setup, NULL);
+/*
+ * The lockbuilder pool must be initialized early because the lockmgr
+ * and sx locks depend on it. The sx locks are used in the kernel
+ * memory allocator. The lockmgr subsystem is initialized by
+ * SYSINIT(..., SI_SUB_LOCK, ...).
+ *
+ * We can't call MALLOC() to dynamically allocate the sleep pool
+ * until after kmeminit() has been called, which is done by
+ * SYSINIT(..., SI_SUB_KMEM, ...).
+ */
+SYSINIT(mtxpooli1, SI_SUB_MTX_POOL_STATIC, SI_ORDER_FIRST,
+ mtx_pool_setup_static, NULL);
+SYSINIT(mtxpooli2, SI_SUB_MTX_POOL_DYNAMIC, SI_ORDER_FIRST,
+ mtx_pool_setup_dynamic, NULL);
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index 848b82c..47f2321 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -1652,7 +1652,7 @@ crget(void)
MALLOC(cr, struct ucred *, sizeof(*cr), M_CRED, M_WAITOK | M_ZERO);
cr->cr_ref = 1;
- cr->cr_mtxp = mtx_pool_find(cr);
+ cr->cr_mtxp = mtx_pool_find(mtxpool_sleep, cr);
#ifdef MAC
mac_init_cred(cr);
#endif
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index c9341bb..6e57414 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -893,7 +893,7 @@ uifind(uid)
free(uip, M_UIDINFO);
uip = old_uip;
} else {
- uip->ui_mtxp = mtx_pool_alloc();
+ uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
uip->ui_uid = uid;
LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
}
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 7db9e72..4256b0a 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -74,7 +74,7 @@ sx_init(struct sx *sx, const char *description)
lock->lo_type = lock->lo_name = description;
lock->lo_flags = LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE |
LO_UPGRADABLE;
- sx->sx_lock = mtx_pool_find(sx);
+ sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
sx->sx_cnt = 0;
cv_init(&sx->sx_shrd_cv, description);
sx->sx_shrd_wcnt = 0;
diff --git a/sys/sys/kernel.h b/sys/sys/kernel.h
index 0fa0dfc..442b777 100644
--- a/sys/sys/kernel.h
+++ b/sys/sys/kernel.h
@@ -112,11 +112,12 @@ enum sysinit_sub_id {
SI_SUB_TUNABLES = 0x0700000, /* establish tunable values */
SI_SUB_CONSOLE = 0x0800000, /* console*/
SI_SUB_COPYRIGHT = 0x0800001, /* first use of console*/
- SI_SUB_MTX_POOL = 0x0900000, /* mutex pool */
+ SI_SUB_MTX_POOL_STATIC = 0x0900000, /* static mutex pool */
SI_SUB_VM = 0x1000000, /* virtual memory system init*/
SI_SUB_KMEM = 0x1800000, /* kernel memory*/
SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
SI_SUB_WITNESS = 0x1A80000, /* witness initialization */
+ SI_SUB_MTX_POOL_DYNAMIC = 0x1AC0000, /* dynamic mutex pool */
SI_SUB_LOCK = 0x1B00000, /* lockmgr locks */
SI_SUB_EVENTHANDLER = 0x1C00000, /* eventhandler init */
SI_SUB_KLD = 0x2000000, /* KLD and module setup */
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index 8bf3344..93388ab 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -236,12 +236,30 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#define mtx_unlock(m) mtx_unlock_flags((m), 0)
#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0)
-struct mtx *mtx_pool_find(void *ptr);
-struct mtx *mtx_pool_alloc(void);
-void mtx_pool_lock(void *ptr);
-void mtx_pool_unlock(void *ptr);
+struct mtx_pool;
+
+struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts);
+void mtx_pool_destroy(struct mtx_pool **poolp);
+struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr);
+struct mtx *mtx_pool_alloc(struct mtx_pool *pool);
+struct mtx *mtx_pool_alloc_spin(struct mtx_pool *pool);
+#define mtx_pool_lock(pool, ptr) \
+ mtx_lock(mtx_pool_find((pool), (ptr)))
+#define mtx_pool_lock_spin(pool, ptr) \
+ mtx_lock_spin(mtx_pool_find((pool), (ptr)))
+#define mtx_pool_unlock(pool, ptr) \
+ mtx_unlock(mtx_pool_find((pool), (ptr)))
+#define mtx_pool_unlock_spin(pool, ptr) \
+ mtx_unlock_spin(mtx_pool_find((pool), (ptr)))
-extern int mtx_pool_valid;
+/*
+ * mtxpool_lockbuilder is a pool of sleep locks that is not witness
+ * checked and should only be used for building higher level locks.
+ *
+ * mtxpool_sleep is a general purpose pool of sleep mutexes.
+ */
+extern struct mtx_pool *mtxpool_lockbuilder;
+extern struct mtx_pool *mtxpool_sleep;
#ifndef LOCK_DEBUG
#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h>
OpenPOWER on IntegriCloud