diff options
author | jeff <jeff@FreeBSD.org> | 2002-06-17 22:02:41 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2002-06-17 22:02:41 +0000 |
commit | 030d3fdb720958d03b806333c701404e96a45088 (patch) | |
tree | fe7766503110bc305fe17c75033e53e8bff19901 /sys/vm/uma_core.c | |
parent | ba0c1d407b5f3469a8026b32d1ea0c5605c9018a (diff) | |
download | FreeBSD-src-030d3fdb720958d03b806333c701404e96a45088.zip FreeBSD-src-030d3fdb720958d03b806333c701404e96a45088.tar.gz |
- Introduce the new M_NOVM option which tells uma to only check the currently
allocated slabs and bucket caches for free items. It will not go ask the vm
for pages. This differs from M_NOWAIT in that it not only doesn't block, it
doesn't even ask.
- Add a new zcreate option ZONE_VM, that sets the BUCKETCACHE zflag. This
tells uma that it should only allocate buckets out of the bucket cache, and
not from the VM. It does this by using the M_NOVM option to zalloc when
getting a new bucket. This is so that the VM doesn't recursively enter
itself while trying to allocate buckets for vm_map_entry zones. If there
are already allocated buckets when we get here we'll still use them but
otherwise we'll skip it.
- Use the ZONE_VM flag on vm map entries and pv entries on x86.
Diffstat (limited to 'sys/vm/uma_core.c')
-rw-r--r-- | sys/vm/uma_core.c | 20 |
1 files changed, 17 insertions, 3 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 0cab592..7eaa8b1 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -1013,6 +1013,9 @@ zone_ctor(void *mem, int size, void *udata) if (arg->flags & UMA_ZONE_NOFREE) zone->uz_flags |= UMA_ZFLAG_NOFREE; + if (arg->flags & UMA_ZONE_VM) + zone->uz_flags |= UMA_ZFLAG_BUCKETCACHE; + if (zone->uz_size > UMA_SLAB_SIZE) zone_large_init(zone); else @@ -1417,9 +1420,16 @@ zalloc_start: /* Now we no longer need the zone lock. */ ZONE_UNLOCK(zone); - if (bucket == NULL) + if (bucket == NULL) { + int bflags; + + bflags = flags; + if (zone->uz_flags & UMA_ZFLAG_BUCKETCACHE) + bflags |= M_NOVM; + bucket = uma_zalloc_internal(bucketzone, - NULL, flags, NULL); + NULL, bflags, NULL); + } if (bucket != NULL) { #ifdef INVARIANTS @@ -1524,7 +1534,8 @@ new_slab: * and cause the vm to allocate vm_map_entries. If we need new * buckets there too we will recurse in kmem_alloc and bad * things happen. So instead we return a NULL bucket, and make - * the code that allocates buckets smart enough to deal with it */ + * the code that allocates buckets smart enough to deal with it + */ if (zone == bucketzone && zone->uz_recurse != 0) { ZONE_UNLOCK(zone); return (NULL); @@ -1541,6 +1552,9 @@ new_slab: goto new_slab; } + if (flags & M_NOVM) + goto alloc_fail; + zone->uz_recurse++; slab = slab_zalloc(zone, flags); zone->uz_recurse--; |