summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_malloc.c
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2000-09-09 22:27:35 +0000
committerjasone <jasone@FreeBSD.org>2000-09-09 22:27:35 +0000
commitee0ea20aad8eba1bf086b9044cf98aa849fddb80 (patch)
tree0cc3ca163dcdfc0995e5d138608d0f8ea35f17af /sys/kern/kern_malloc.c
parentb9a0a919436adb24714fc6d749eb7d7da761e91f (diff)
downloadFreeBSD-src-ee0ea20aad8eba1bf086b9044cf98aa849fddb80.zip
FreeBSD-src-ee0ea20aad8eba1bf086b9044cf98aa849fddb80.tar.gz
Add a mutex to the malloc interfaces so that it can safely be called
without owning the Giant lock.
Diffstat (limited to 'sys/kern/kern_malloc.c')
-rw-r--r--sys/kern/kern_malloc.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 9348210..93ae55e 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -43,6 +43,8 @@
#include <sys/mbuf.h>
#include <sys/vmmeter.h>
#include <sys/lock.h>
+#include <sys/proc.h>
+#include <machine/mutex.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -73,6 +75,8 @@ static struct kmemusage *kmemusage;
static char *kmembase;
static char *kmemlimit;
+static mtx_t malloc_mtx;
+
u_int vm_kmem_size;
#ifdef INVARIANTS
@@ -150,6 +154,7 @@ malloc(size, type, flags)
indx = BUCKETINDX(size);
kbp = &bucket[indx];
s = splmem();
+ mtx_enter(&malloc_mtx, MTX_DEF);
while (ksp->ks_memuse >= ksp->ks_limit) {
if (flags & M_ASLEEP) {
if (ksp->ks_limblocks < 65535)
@@ -158,6 +163,7 @@ malloc(size, type, flags)
}
if (flags & M_NOWAIT) {
splx(s);
+ mtx_exit(&malloc_mtx, MTX_DEF);
return ((void *) NULL);
}
if (ksp->ks_limblocks < 65535)
@@ -178,6 +184,7 @@ malloc(size, type, flags)
va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags);
if (va == NULL) {
splx(s);
+ mtx_exit(&malloc_mtx, MTX_DEF);
return ((void *) NULL);
}
kbp->kb_total += kbp->kb_elmpercl;
@@ -262,6 +269,7 @@ out:
if (ksp->ks_memuse > ksp->ks_maxused)
ksp->ks_maxused = ksp->ks_memuse;
splx(s);
+ mtx_exit(&malloc_mtx, MTX_DEF);
return ((void *) va);
}
@@ -294,6 +302,7 @@ free(addr, type)
size = 1 << kup->ku_indx;
kbp = &bucket[kup->ku_indx];
s = splmem();
+ mtx_enter(&malloc_mtx, MTX_DEF);
#ifdef INVARIANTS
/*
* Check for returns of data that do not point to the
@@ -319,6 +328,7 @@ free(addr, type)
ksp->ks_inuse--;
kbp->kb_total -= 1;
splx(s);
+ mtx_exit(&malloc_mtx, MTX_DEF);
return;
}
freep = (struct freelist *)addr;
@@ -385,6 +395,7 @@ free(addr, type)
}
#endif
splx(s);
+ mtx_exit(&malloc_mtx, MTX_DEF);
}
/*
@@ -410,6 +421,8 @@ kmeminit(dummy)
#error "kmeminit: MAXALLOCSAVE too small"
#endif
+ mtx_init(&malloc_mtx, "malloc", MTX_DEF);
+
/*
* Try to auto-tune the kernel memory size, so that it is
* more applicable for a wider range of machine sizes.
@@ -512,6 +525,7 @@ malloc_uninit(data)
#ifdef INVARIANTS
s = splmem();
+ mtx_enter(&malloc_mtx, MTX_DEF);
for (indx = 0; indx < MINBUCKET + 16; indx++) {
kbp = bucket + indx;
freep = (struct freelist*)kbp->kb_next;
@@ -522,6 +536,7 @@ malloc_uninit(data)
}
}
splx(s);
+ mtx_exit(&malloc_mtx, MTX_DEF);
if (type->ks_memuse != 0)
printf("malloc_uninit: %ld bytes of '%s' still allocated\n",
OpenPOWER on IntegriCloud