From aeb98331c1a874e05cc0e4d1ab335e18db4fced3 Mon Sep 17 00:00:00 2001 From: jeff Date: Wed, 18 Sep 2002 08:26:30 +0000 Subject: - Split UMA_ZFLAG_OFFPAGE into UMA_ZFLAG_OFFPAGE and UMA_ZFLAG_HASH. - Remove all instances of the mallochash. - Stash the slab pointer in the vm page's object pointer when allocating from the kmem_obj. - Use the overloaded object pointer to find slabs for malloced memory. --- sys/kern/kern_malloc.c | 42 +++++++++--------------------------------- 1 file changed, 9 insertions(+), 33 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index c7bec3e..3d6307a 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -48,11 +48,12 @@ #include #include +#include #include #include #include -#include #include +#include #include #include #include @@ -120,8 +121,7 @@ struct { u_int vm_kmem_size; /* - * The malloc_mtx protects the kmemstatistics linked list as well as the - * mallochash. + * The malloc_mtx protects the kmemstatistics linked list. */ struct mtx malloc_mtx; @@ -206,10 +206,9 @@ free(addr, type) void *addr; struct malloc_type *type; { + register struct malloc_type *ksp = type; uma_slab_t slab; - void *mem; u_long size; - register struct malloc_type *ksp = type; /* free(NULL, ...) does nothing */ if (addr == NULL) @@ -217,14 +216,12 @@ free(addr, type) size = 0; - mem = (void *)((u_long)addr & (~UMA_SLAB_MASK)); - mtx_lock(&malloc_mtx); - slab = hash_sfind(mallochash, mem); - mtx_unlock(&malloc_mtx); + slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); if (slab == NULL) panic("free: address %p(%p) has not been allocated.\n", - addr, mem); + addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); + if (!(slab->us_flags & UMA_SLAB_MALLOC)) { #ifdef INVARIANTS @@ -275,10 +272,7 @@ realloc(addr, size, type, flags) if (addr == NULL) return (malloc(size, type, flags)); - mtx_lock(&malloc_mtx); - slab = hash_sfind(mallochash, - (void *)((u_long)addr & ~(UMA_SLAB_MASK))); - mtx_unlock(&malloc_mtx); + slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); /* Sanity check */ KASSERT(slab != NULL, @@ -333,10 +327,6 @@ kmeminit(dummy) u_int8_t indx; u_long npg; u_long mem_size; - void *hashmem; - u_long hashsize; - int highbit; - int bits; int i; mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); @@ -392,21 +382,7 @@ kmeminit(dummy) (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE)); kmem_map->system_map = 1; - hashsize = npg * sizeof(void *); - - highbit = 0; - bits = 0; - /* The hash size must be a power of two */ - for (i = 0; i < 8 * sizeof(hashsize); i++) - if (hashsize & (1 << i)) { - highbit = i; - bits++; - } - if (bits > 1) - hashsize = 1 << (highbit); - - hashmem = (void *)kmem_alloc(kernel_map, (vm_size_t)hashsize); - uma_startup2(hashmem, hashsize / sizeof(void *)); + uma_startup2(); for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { int size = kmemzones[indx].kz_size; -- cgit v1.1