diff options
author | jeff <jeff@FreeBSD.org> | 2002-06-19 20:49:44 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2002-06-19 20:49:44 +0000 |
commit | 4df8a5cb052267b063f5bca6cd609ca5babf4d5a (patch) | |
tree | d1af9354467e145869de617bdbd1a5e8a1bb428b /sys | |
parent | 0776d3f180e1070fc4edad0433924c5e97a5ca22 (diff) | |
download | FreeBSD-src-4df8a5cb052267b063f5bca6cd609ca5babf4d5a.zip FreeBSD-src-4df8a5cb052267b063f5bca6cd609ca5babf4d5a.tar.gz |
- Remove bogus use of kmem_alloc that was inherited from the old zone
allocator.
- Properly set M_ZERO when talking to the back end page allocators for
non malloc zones. This forces us to zero fill pages when they are first
brought into a cache.
- Properly handle M_ZERO in uma_zalloc_internal. This fixes a problem where
per cpu buckets weren't always getting zeroed.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/vm/uma.h | 1 | ||||
-rw-r--r-- | sys/vm/uma_core.c | 34 |
2 files changed, 18 insertions, 17 deletions
diff --git a/sys/vm/uma.h b/sys/vm/uma.h index 2b35360..ea98638 100644 --- a/sys/vm/uma.h +++ b/sys/vm/uma.h @@ -402,7 +402,6 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef); */ #define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */ #define UMA_SLAB_KMEM 0x02 /* Slab alloced from kmem_map */ -#define UMA_SLAB_KMAP 0x04 /* Slab alloced from kernel_map */ #define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */ #define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */ #define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */ diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 82496e7..898f507 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -697,6 +697,18 @@ slab_zalloc(uma_zone_t zone, int wait) } } + /* + * This reproduces the old vm_zone behavior of zero filling pages the + * first time they are added to a zone. + * + * Malloced items are zeroed in uma_zalloc. + */ + + if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0) + wait |= M_ZERO; + else + wait &= ~M_ZERO; + if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) { mtx_lock(&Giant); mem = zone->uz_allocf(zone, @@ -794,18 +806,8 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) { void *p; /* Returned page */ - /* - * XXX The original zone allocator did this, but I don't think it's - * necessary in current. - */ - - if (lockstatus(&kernel_map->lock, NULL)) { - *pflag = UMA_SLAB_KMEM; - p = (void *) kmem_malloc(kmem_map, bytes, wait); - } else { - *pflag = UMA_SLAB_KMAP; - p = (void *) kmem_alloc(kernel_map, bytes); - } + *pflag = UMA_SLAB_KMEM; + p = (void *) kmem_malloc(kmem_map, bytes, wait); return (p); } @@ -874,10 +876,9 @@ static void page_free(void *mem, int size, u_int8_t flags) { vm_map_t map; + if (flags & UMA_SLAB_KMEM) map = kmem_map; - else if (flags & UMA_SLAB_KMAP) - map = kernel_map; else panic("UMA: page_free used with invalid flags %d\n", flags); @@ -1620,8 +1621,9 @@ new_slab: ZONE_UNLOCK(zone); /* Only construct at this time if we're not filling a bucket */ - if (bucket == NULL && zone->uz_ctor != NULL) { - zone->uz_ctor(item, zone->uz_size, udata); + if (bucket == NULL) { + if (zone->uz_ctor != NULL) + zone->uz_ctor(item, zone->uz_size, udata); if (flags & M_ZERO) bzero(item, zone->uz_size); } |