diff options
author | jeff <jeff@FreeBSD.org> | 2002-06-17 22:02:41 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2002-06-17 22:02:41 +0000 |
commit | 030d3fdb720958d03b806333c701404e96a45088 (patch) | |
tree | fe7766503110bc305fe17c75033e53e8bff19901 /sys | |
parent | ba0c1d407b5f3469a8026b32d1ea0c5605c9018a (diff) | |
download | FreeBSD-src-030d3fdb720958d03b806333c701404e96a45088.zip FreeBSD-src-030d3fdb720958d03b806333c701404e96a45088.tar.gz |
- Introduce the new M_NOVM option which tells uma to only check the currently
allocated slabs and bucket caches for free items. It will not go ask the vm
for pages. This differs from M_NOWAIT in that it not only doesn't block, it
doesn't even ask.
- Add a new zcreate option ZONE_VM, that sets the BUCKETCACHE zflag. This
tells uma that it should only allocate buckets out of the bucket cache, and
not from the VM. It does this by using the M_NOVM option to zalloc when
getting a new bucket. This is so that the VM doesn't recursively enter
itself while trying to allocate buckets for vm_map_entry zones. If there
are already allocated buckets when we get here we'll still use them but
otherwise we'll skip it.
- Use the ZONE_VM flag on vm map entries and pv entries on x86.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/amd64/amd64/pmap.c | 2 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 2 | ||||
-rw-r--r-- | sys/sys/malloc.h | 1 | ||||
-rw-r--r-- | sys/vm/uma.h | 1 | ||||
-rw-r--r-- | sys/vm/uma_core.c | 20 | ||||
-rw-r--r-- | sys/vm/uma_int.h | 1 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 3 |
7 files changed, 24 insertions, 6 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index fc10df9..d3478d2 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -485,7 +485,7 @@ pmap_init(phys_start, phys_end) if (initial_pvs < MINPV) initial_pvs = MINPV; pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, - NULL, NULL, UMA_ALIGN_PTR, 0); + NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pvzone, pmap_allocf); uma_prealloc(pvzone, initial_pvs); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index fc10df9..d3478d2 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -485,7 +485,7 @@ pmap_init(phys_start, phys_end) if (initial_pvs < MINPV) initial_pvs = MINPV; pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, - NULL, NULL, UMA_ALIGN_PTR, 0); + NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_zone_set_allocf(pvzone, pmap_allocf); uma_prealloc(pvzone, initial_pvs); diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h index e0f5207..3bf564a 100644 --- a/sys/sys/malloc.h +++ b/sys/sys/malloc.h @@ -52,6 +52,7 @@ #define M_NOWAIT 0x0001 /* do not block */ #define M_USE_RESERVE 0x0002 /* can alloc out of reserve memory */ #define M_ZERO 0x0004 /* bzero the allocation */ +#define M_NOVM 0x0008 /* Don't ask the VM for pages */ #define M_MAGIC 877983977 /* time when first defined :-) */ diff --git a/sys/vm/uma.h b/sys/vm/uma.h index fb144cf..2b35360 100644 --- a/sys/vm/uma.h +++ b/sys/vm/uma.h @@ -173,6 +173,7 @@ uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, #define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */ #define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */ #define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */ +#define UMA_ZONE_VM 0x0080 /* Used for internal vm datastructures */ /* Definitions for align */ #define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */ diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 0cab592..7eaa8b1 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -1013,6 +1013,9 @@ zone_ctor(void *mem, int size, void *udata) if (arg->flags & UMA_ZONE_NOFREE) zone->uz_flags |= UMA_ZFLAG_NOFREE; + if (arg->flags & UMA_ZONE_VM) + zone->uz_flags |= UMA_ZFLAG_BUCKETCACHE; + if (zone->uz_size > UMA_SLAB_SIZE) zone_large_init(zone); else @@ -1417,9 +1420,16 @@ zalloc_start: /* Now we no longer need the zone lock. */ ZONE_UNLOCK(zone); - if (bucket == NULL) + if (bucket == NULL) { + int bflags; + + bflags = flags; + if (zone->uz_flags & UMA_ZFLAG_BUCKETCACHE) + bflags |= M_NOVM; + bucket = uma_zalloc_internal(bucketzone, - NULL, flags, NULL); + NULL, bflags, NULL); + } if (bucket != NULL) { #ifdef INVARIANTS @@ -1524,7 +1534,8 @@ new_slab: * and cause the vm to allocate vm_map_entries. If we need new * buckets there too we will recurse in kmem_alloc and bad * things happen. So instead we return a NULL bucket, and make - * the code that allocates buckets smart enough to deal with it */ + * the code that allocates buckets smart enough to deal with it + */ if (zone == bucketzone && zone->uz_recurse != 0) { ZONE_UNLOCK(zone); return (NULL); @@ -1541,6 +1552,9 @@ new_slab: goto new_slab; } + if (flags & M_NOVM) + goto alloc_fail; + zone->uz_recurse++; slab = slab_zalloc(zone, flags); zone->uz_recurse--; diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h index d7c86bb..e09d549 100644 --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -273,6 +273,7 @@ struct uma_zone { #define UMA_ZFLAG_MALLOC 0x0008 /* Zone created by malloc */ #define UMA_ZFLAG_NOFREE 0x0010 /* Don't free data from this zone */ #define UMA_ZFLAG_FULL 0x0020 /* This zone reached uz_maxpages */ +#define UMA_ZFLAG_BUCKETCACHE 0x0040 /* Only allocate buckets from cache */ /* This lives in uflags */ #define UMA_ZONE_INTERNAL 0x1000 /* Internal zone for uflags */ diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index d8e07f2..4453b72 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -159,7 +159,8 @@ vm_map_startup(void) vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); uma_prealloc(mapzone, MAX_KMAP); kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS); + NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, + UMA_ZONE_MTXCLASS | UMA_ZONE_VM); uma_prealloc(kmapentzone, MAX_KMAPENT); mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); |