summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2007-02-22 19:10:30 +0000
committerjasone <jasone@FreeBSD.org>2007-02-22 19:10:30 +0000
commita1e21ebd26fe70a8ecfd4d10e7034039ac5fc059 (patch)
tree688d2014d5ebbfd59837ab970c00f445d348ba07 /lib
parentb1293d942048743241fdb7f3984117450c2ce046 (diff)
downloadFreeBSD-src-a1e21ebd26fe70a8ecfd4d10e7034039ac5fc059.zip
FreeBSD-src-a1e21ebd26fe70a8ecfd4d10e7034039ac5fc059.tar.gz
Modify chunk_alloc() to prefer mmap()ed memory over sbrk()ed memory.
This has no impact unless USE_BRK is defined (32-bit platforms), in which case user allocations are allocated via mmap() if at all possible, in order to avoid the possibility of unreclaimable chunks in the data segment. Fix an obscure bug in base_alloc() that could have allowed undefined behavior if an application were to use sbrk() in conjunction with a USE_BRK-enabled malloc.
Diffstat (limited to 'lib')
-rw-r--r--lib/libc/stdlib/malloc.c76
1 files changed, 40 insertions, 36 deletions
diff --git a/lib/libc/stdlib/malloc.c b/lib/libc/stdlib/malloc.c
index 1b470be..9b0fccc 100644
--- a/lib/libc/stdlib/malloc.c
+++ b/lib/libc/stdlib/malloc.c
@@ -1029,8 +1029,8 @@ base_chunk_alloc(size_t minsize)
malloc_mutex_unlock(&brk_mtx);
base_chunk = brk_cur;
base_next_addr = base_chunk;
- base_past_addr = (void *)((uintptr_t)base_chunk +
- incr);
+ base_past_addr = (void *)((uintptr_t)base_chunk
+ + incr);
#ifdef MALLOC_STATS
base_total += incr;
#endif
@@ -1042,8 +1042,8 @@ base_chunk_alloc(size_t minsize)
#endif
/*
- * Don't worry about chunk alignment here, since base_chunk doesn't really
- * need to be aligned.
+ * Don't worry about chunk alignment here, since base_chunk doesn't
+ * really need to be aligned.
*/
base_chunk = pages_map(NULL, chunk_size);
if (base_chunk == NULL)
@@ -1067,8 +1067,12 @@ base_alloc(size_t size)
malloc_mutex_lock(&base_mtx);
- /* Make sure there's enough space for the allocation. */
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
+ /*
+ * Make sure there's enough space for the allocation.
+ * base_chunk_alloc() does not guarantee that a newly allocated chunk
+ * is >= size, so loop here, rather than only trying once.
+ */
+ while ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
if (base_chunk_alloc(csize)) {
ret = NULL;
goto RETURN;
@@ -1299,6 +1303,36 @@ chunk_alloc(size_t size)
}
}
+ /*
+ * Try to over-allocate, but allow the OS to place the allocation
+ * anywhere. Beware of size_t wrap-around.
+ */
+ if (size + chunk_size > size) {
+ if ((ret = pages_map(NULL, size + chunk_size)) != NULL) {
+ size_t offset = CHUNK_ADDR2OFFSET(ret);
+
+ /*
+ * Success. Clean up unneeded leading/trailing space.
+ */
+ if (offset != 0) {
+ /* Leading space. */
+ pages_unmap(ret, chunk_size - offset);
+
+ ret = (void *)((uintptr_t)ret + (chunk_size -
+ offset));
+
+ /* Trailing space. */
+ pages_unmap((void *)((uintptr_t)ret + size),
+ offset);
+ } else {
+ /* Trailing space only. */
+ pages_unmap((void *)((uintptr_t)ret + size),
+ chunk_size);
+ }
+ goto RETURN;
+ }
+ }
+
#ifdef USE_BRK
/*
* Try to create allocations in brk, in order to make full use of
@@ -1342,36 +1376,6 @@ chunk_alloc(size_t size)
}
#endif
- /*
- * Try to over-allocate, but allow the OS to place the allocation
- * anywhere. Beware of size_t wrap-around.
- */
- if (size + chunk_size > size) {
- if ((ret = pages_map(NULL, size + chunk_size)) != NULL) {
- size_t offset = CHUNK_ADDR2OFFSET(ret);
-
- /*
- * Success. Clean up unneeded leading/trailing space.
- */
- if (offset != 0) {
- /* Leading space. */
- pages_unmap(ret, chunk_size - offset);
-
- ret = (void *)((uintptr_t)ret + (chunk_size -
- offset));
-
- /* Trailing space. */
- pages_unmap((void *)((uintptr_t)ret + size),
- offset);
- } else {
- /* Trailing space only. */
- pages_unmap((void *)((uintptr_t)ret + size),
- chunk_size);
- }
- goto RETURN;
- }
- }
-
/* All strategies for allocation failed. */
ret = NULL;
RETURN:
OpenPOWER on IntegriCloud