summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2005-09-09 06:03:08 +0000
committeralc <alc@FreeBSD.org>2005-09-09 06:03:08 +0000
commitd03626c7d60a1113b002cdc903e26f3bff19e3f2 (patch)
tree2c3504731b3e651c8f15aae7e96c59638f84400a /sys
parent29fd49dcdf9f5607e5d0380e463a21713c498160 (diff)
downloadFreeBSD-src-d03626c7d60a1113b002cdc903e26f3bff19e3f2.zip
FreeBSD-src-d03626c7d60a1113b002cdc903e26f3bff19e3f2.tar.gz
Introduce a new lock for the purpose of synchronizing access to the
UMA boot pages. Disable recursion on the general UMA lock now that startup_alloc() no longer uses it. Eliminate the variable uma_boot_free. It serves no purpose. Note: This change eliminates a lock-order reversal between a system map mutex and the UMA lock. See http://sources.zabbadoz.net/freebsd/lor.html#109 for details. MFC after: 3 days
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/uma_core.c31
1 files changed, 9 insertions, 22 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 50541d0..49d1e3c 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -124,8 +124,8 @@ static struct mtx uma_mtx;
static LIST_HEAD(,uma_slab) uma_boot_pages =
LIST_HEAD_INITIALIZER(&uma_boot_pages);
-/* Count of free boottime pages */
-static int uma_boot_free = 0;
+/* This mutex protects the boot time pages list */
+static struct mtx uma_boot_pages_mtx;
/* Is the VM done starting up? */
static int booted = 0;
@@ -905,24 +905,21 @@ static void *
startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
{
uma_keg_t keg;
+ uma_slab_t tmps;
keg = zone->uz_keg;
/*
* Check our small startup cache to see if it has pages remaining.
*/
- mtx_lock(&uma_mtx);
- if (uma_boot_free != 0) {
- uma_slab_t tmps;
-
- tmps = LIST_FIRST(&uma_boot_pages);
+ mtx_lock(&uma_boot_pages_mtx);
+ if ((tmps = LIST_FIRST(&uma_boot_pages)) != NULL) {
LIST_REMOVE(tmps, us_link);
- uma_boot_free--;
- mtx_unlock(&uma_mtx);
+ mtx_unlock(&uma_boot_pages_mtx);
*pflag = tmps->us_flags;
return (tmps->us_data);
}
- mtx_unlock(&uma_mtx);
+ mtx_unlock(&uma_boot_pages_mtx);
if (booted == 0)
panic("UMA: Increase UMA_BOOT_PAGES");
/*
@@ -1513,17 +1510,7 @@ uma_startup(void *bootmem)
#ifdef UMA_DEBUG
printf("Creating uma keg headers zone and keg.\n");
#endif
- /*
- * The general UMA lock is a recursion-allowed lock because
- * there is a code path where, while we're still configured
- * to use startup_alloc() for backend page allocations, we
- * may end up in uma_reclaim() which calls zone_foreach(zone_drain),
- * which grabs uma_mtx, only to later call into startup_alloc()
- * because while freeing we needed to allocate a bucket. Since
- * startup_alloc() also takes uma_mtx, we need to be able to
- * recurse on it.
- */
- mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE);
+ mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
/*
* Figure out the maximum number of items-per-slab we'll have if
@@ -1617,8 +1604,8 @@ uma_startup(void *bootmem)
slab->us_data = (u_int8_t *)slab;
slab->us_flags = UMA_SLAB_BOOT;
LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
- uma_boot_free++;
}
+ mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
#ifdef UMA_DEBUG
printf("Creating uma zone headers zone and keg.\n");
OpenPOWER on IntegriCloud