summaryrefslogtreecommitdiffstats
path: root/sys/vm/uma_core.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2002-06-19 20:49:44 +0000
committerjeff <jeff@FreeBSD.org>2002-06-19 20:49:44 +0000
commit4df8a5cb052267b063f5bca6cd609ca5babf4d5a (patch)
treed1af9354467e145869de617bdbd1a5e8a1bb428b /sys/vm/uma_core.c
parent0776d3f180e1070fc4edad0433924c5e97a5ca22 (diff)
downloadFreeBSD-src-4df8a5cb052267b063f5bca6cd609ca5babf4d5a.zip
FreeBSD-src-4df8a5cb052267b063f5bca6cd609ca5babf4d5a.tar.gz
- Remove bogus use of kmem_alloc that was inherited from the old zone
allocator. - Properly set M_ZERO when talking to the back end page allocators for non malloc zones. This forces us to zero fill pages when they are first brought into a cache. - Properly handle M_ZERO in uma_zalloc_internal. This fixes a problem where per cpu buckets weren't always getting zeroed.
Diffstat (limited to 'sys/vm/uma_core.c')
-rw-r--r--sys/vm/uma_core.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 82496e7..898f507 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -697,6 +697,18 @@ slab_zalloc(uma_zone_t zone, int wait)
}
}
+ /*
+ * This reproduces the old vm_zone behavior of zero filling pages the
+ * first time they are added to a zone.
+ *
+ * Malloced items are zeroed in uma_zalloc.
+ */
+
+ if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
+ wait |= M_ZERO;
+ else
+ wait &= ~M_ZERO;
+
if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) {
mtx_lock(&Giant);
mem = zone->uz_allocf(zone,
@@ -794,18 +806,8 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
{
void *p; /* Returned page */
- /*
- * XXX The original zone allocator did this, but I don't think it's
- * necessary in current.
- */
-
- if (lockstatus(&kernel_map->lock, NULL)) {
- *pflag = UMA_SLAB_KMEM;
- p = (void *) kmem_malloc(kmem_map, bytes, wait);
- } else {
- *pflag = UMA_SLAB_KMAP;
- p = (void *) kmem_alloc(kernel_map, bytes);
- }
+ *pflag = UMA_SLAB_KMEM;
+ p = (void *) kmem_malloc(kmem_map, bytes, wait);
return (p);
}
@@ -874,10 +876,9 @@ static void
page_free(void *mem, int size, u_int8_t flags)
{
vm_map_t map;
+
if (flags & UMA_SLAB_KMEM)
map = kmem_map;
- else if (flags & UMA_SLAB_KMAP)
- map = kernel_map;
else
panic("UMA: page_free used with invalid flags %d\n", flags);
@@ -1620,8 +1621,9 @@ new_slab:
ZONE_UNLOCK(zone);
/* Only construct at this time if we're not filling a bucket */
- if (bucket == NULL && zone->uz_ctor != NULL) {
- zone->uz_ctor(item, zone->uz_size, udata);
+ if (bucket == NULL) {
+ if (zone->uz_ctor != NULL)
+ zone->uz_ctor(item, zone->uz_size, udata);
if (flags & M_ZERO)
bzero(item, zone->uz_size);
}
OpenPOWER on IntegriCloud