summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrich <rich@FreeBSD.org>1993-10-19 18:22:37 +0000
committerrich <rich@FreeBSD.org>1993-10-19 18:22:37 +0000
commit5e95a8816c4f57a232031dd052ad53cf1382247e (patch)
treeae5f9f132e608eecd6de80593888a2c2ed38bc65
parent62f301bbd69e2cb40390b6884683e4dd92705655 (diff)
downloadFreeBSD-src-5e95a8816c4f57a232031dd052ad53cf1382247e.zip
FreeBSD-src-5e95a8816c4f57a232031dd052ad53cf1382247e.tar.gz
merge free.c realloc.c into malloc.c so that all three are linked in
if any are referenced. libc's malloc.o contains malloc(), free() and realloc(). And libc refers to realloc which will cause the linker to pull in redundant malloc() and free() definitions from malloc.o if it isn't already linked in from GNU malloc. Rich
-rw-r--r--gnu/lib/libmalloc/Makefile4
-rw-r--r--gnu/lib/libmalloc/malloc.c304
2 files changed, 306 insertions, 2 deletions
diff --git a/gnu/lib/libmalloc/Makefile b/gnu/lib/libmalloc/Makefile
index b5aeb26..463927d 100644
--- a/gnu/lib/libmalloc/Makefile
+++ b/gnu/lib/libmalloc/Makefile
@@ -1,9 +1,9 @@
-# $Id: Makefile,v 1.1 1993/09/23 21:10:40 cgd Exp $
+# $Id: Makefile,v 1.1 1993/09/24 13:03:14 rgrimes Exp $
CFLAGS+= -I${.CURDIR}
LIB= gnumalloc
-SRCS+= malloc.c free.c cfree.c realloc.c calloc.c morecore.c
+SRCS+= malloc.c cfree.c calloc.c morecore.c
SRCS+= memalign.c valloc.c mcheck.c mtrace.c mstats.c vm-limit.c
SRCS+= ralloc.c
NOMAN= noman
diff --git a/gnu/lib/libmalloc/malloc.c b/gnu/lib/libmalloc/malloc.c
index 47a6b8d..3c1ee99 100644
--- a/gnu/lib/libmalloc/malloc.c
+++ b/gnu/lib/libmalloc/malloc.c
@@ -316,3 +316,307 @@ malloc (size)
return result;
}
+
+#define min(A, B) ((A) < (B) ? (A) : (B))
+
+/* Debugging hook for realloc. */
+__ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, size_t __size));
+
+/* Resize the given region to the new size, returning a pointer
+ to the (possibly moved) region. This is optimized for speed;
+ some benchmarks seem to indicate that greater compactness is
+ achieved by unconditionally allocating and copying to a
+ new region. This module has incestuous knowledge of the
+ internals of both free and malloc. */
+__ptr_t
+realloc (ptr, size)
+ __ptr_t ptr;
+ size_t size;
+{
+ __ptr_t result;
+ int type;
+ size_t block, blocks, oldlimit;
+
+ if (size == 0)
+ {
+ free (ptr);
+ return malloc (0);
+ }
+ else if (ptr == NULL)
+ return malloc (size);
+
+ if (__realloc_hook != NULL)
+ return (*__realloc_hook) (ptr, size);
+
+ block = BLOCK (ptr);
+
+ type = _heapinfo[block].busy.type;
+ switch (type)
+ {
+ case 0:
+ /* Maybe reallocate a large block to a small fragment. */
+ if (size <= BLOCKSIZE / 2)
+ {
+ result = malloc (size);
+ if (result != NULL)
+ {
+ memcpy (result, ptr, size);
+ free (ptr);
+ return result;
+ }
+ }
+
+ /* The new size is a large allocation as well;
+ see if we can hold it in place. */
+ blocks = BLOCKIFY (size);
+ if (blocks < _heapinfo[block].busy.info.size)
+ {
+ /* The new size is smaller; return
+ excess memory to the free list. */
+ _heapinfo[block + blocks].busy.type = 0;
+ _heapinfo[block + blocks].busy.info.size
+ = _heapinfo[block].busy.info.size - blocks;
+ _heapinfo[block].busy.info.size = blocks;
+ free (ADDRESS (block + blocks));
+ result = ptr;
+ }
+ else if (blocks == _heapinfo[block].busy.info.size)
+ /* No size change necessary. */
+ result = ptr;
+ else
+ {
+ /* Won't fit, so allocate a new region that will.
+ Free the old region first in case there is sufficient
+ adjacent free space to grow without moving. */
+ blocks = _heapinfo[block].busy.info.size;
+ /* Prevent free from actually returning memory to the system. */
+ oldlimit = _heaplimit;
+ _heaplimit = 0;
+ free (ptr);
+ _heaplimit = oldlimit;
+ result = malloc (size);
+ if (result == NULL)
+ {
+ /* Now we're really in trouble. We have to unfree
+ the thing we just freed. Unfortunately it might
+ have been coalesced with its neighbors. */
+ if (_heapindex == block)
+ (void) malloc (blocks * BLOCKSIZE);
+ else
+ {
+ __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
+ (void) malloc (blocks * BLOCKSIZE);
+ free (previous);
+ }
+ return NULL;
+ }
+ if (ptr != result)
+ memmove (result, ptr, blocks * BLOCKSIZE);
+ }
+ break;
+
+ default:
+ /* Old size is a fragment; type is logarithm
+ to base two of the fragment size. */
+ if (size > (size_t) (1 << (type - 1)) && size <= (size_t) (1 << type))
+ /* The new size is the same kind of fragment. */
+ result = ptr;
+ else
+ {
+ /* The new size is different; allocate a new space,
+ and copy the lesser of the new size and the old. */
+ result = malloc (size);
+ if (result == NULL)
+ return NULL;
+ memcpy (result, ptr, min (size, (size_t) 1 << type));
+ free (ptr);
+ }
+ break;
+ }
+
+ return result;
+}
+
+/* Debugging hook for free. */
+void (*__free_hook) __P ((__ptr_t __ptr));
+
+/* List of blocks allocated by memalign. */
+struct alignlist *_aligned_blocks = NULL;
+
+/* Return memory to the heap.
+ Like `free' but don't call a __free_hook if there is one. */
+void
+_free_internal (ptr)
+ __ptr_t ptr;
+{
+ int type;
+ size_t block, blocks;
+ register size_t i;
+ struct list *prev, *next;
+
+ block = BLOCK (ptr);
+
+ type = _heapinfo[block].busy.type;
+ switch (type)
+ {
+ case 0:
+ /* Get as many statistics as early as we can. */
+ --_chunks_used;
+ _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
+ _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
+
+ /* Find the free cluster previous to this one in the free list.
+ Start searching at the last block referenced; this may benefit
+ programs with locality of allocation. */
+ i = _heapindex;
+ if (i > block)
+ while (i > block)
+ i = _heapinfo[i].free.prev;
+ else
+ {
+ do
+ i = _heapinfo[i].free.next;
+ while (i > 0 && i < block);
+ i = _heapinfo[i].free.prev;
+ }
+
+ /* Determine how to link this block into the free list. */
+ if (block == i + _heapinfo[i].free.size)
+ {
+ /* Coalesce this block with its predecessor. */
+ _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
+ block = i;
+ }
+ else
+ {
+ /* Really link this block back into the free list. */
+ _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
+ _heapinfo[block].free.next = _heapinfo[i].free.next;
+ _heapinfo[block].free.prev = i;
+ _heapinfo[i].free.next = block;
+ _heapinfo[_heapinfo[block].free.next].free.prev = block;
+ ++_chunks_free;
+ }
+
+ /* Now that the block is linked in, see if we can coalesce it
+ with its successor (by deleting its successor from the list
+ and adding in its size). */
+ if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
+ {
+ _heapinfo[block].free.size
+ += _heapinfo[_heapinfo[block].free.next].free.size;
+ _heapinfo[block].free.next
+ = _heapinfo[_heapinfo[block].free.next].free.next;
+ _heapinfo[_heapinfo[block].free.next].free.prev = block;
+ --_chunks_free;
+ }
+
+ /* Now see if we can return stuff to the system. */
+ blocks = _heapinfo[block].free.size;
+ if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
+ && (*__morecore) (0) == ADDRESS (block + blocks))
+ {
+ register size_t bytes = blocks * BLOCKSIZE;
+ _heaplimit -= blocks;
+ (*__morecore) (-bytes);
+ _heapinfo[_heapinfo[block].free.prev].free.next
+ = _heapinfo[block].free.next;
+ _heapinfo[_heapinfo[block].free.next].free.prev
+ = _heapinfo[block].free.prev;
+ block = _heapinfo[block].free.prev;
+ --_chunks_free;
+ _bytes_free -= bytes;
+ }
+
+ /* Set the next search to begin at this block. */
+ _heapindex = block;
+ break;
+
+ default:
+ /* Do some of the statistics. */
+ --_chunks_used;
+ _bytes_used -= 1 << type;
+ ++_chunks_free;
+ _bytes_free += 1 << type;
+
+ /* Get the address of the first free fragment in this block. */
+ prev = (struct list *) ((char *) ADDRESS (block) +
+ (_heapinfo[block].busy.info.frag.first << type));
+
+ if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
+ {
+ /* If all fragments of this block are free, remove them
+ from the fragment list and free the whole block. */
+ next = prev;
+ for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
+ next = next->next;
+ prev->prev->next = next;
+ if (next != NULL)
+ next->prev = prev->prev;
+ _heapinfo[block].busy.type = 0;
+ _heapinfo[block].busy.info.size = 1;
+
+ /* Keep the statistics accurate. */
+ ++_chunks_used;
+ _bytes_used += BLOCKSIZE;
+ _chunks_free -= BLOCKSIZE >> type;
+ _bytes_free -= BLOCKSIZE;
+
+ free (ADDRESS (block));
+ }
+ else if (_heapinfo[block].busy.info.frag.nfree != 0)
+ {
+ /* If some fragments of this block are free, link this
+ fragment into the fragment list after the first free
+ fragment of this block. */
+ next = (struct list *) ptr;
+ next->next = prev->next;
+ next->prev = prev;
+ prev->next = next;
+ if (next->next != NULL)
+ next->next->prev = next;
+ ++_heapinfo[block].busy.info.frag.nfree;
+ }
+ else
+ {
+ /* No fragments of this block are free, so link this
+ fragment into the fragment list and announce that
+ it is the first free fragment of this block. */
+ prev = (struct list *) ptr;
+ _heapinfo[block].busy.info.frag.nfree = 1;
+ _heapinfo[block].busy.info.frag.first = (unsigned long int)
+ ((unsigned long int) ((char *) ptr - (char *) NULL)
+ % BLOCKSIZE >> type);
+ prev->next = _fraghead[type].next;
+ prev->prev = &_fraghead[type];
+ prev->prev->next = prev;
+ if (prev->next != NULL)
+ prev->next->prev = prev;
+ }
+ break;
+ }
+}
+
+/* Return memory to the heap. */
+void
+free (ptr)
+ __ptr_t ptr;
+{
+ register struct alignlist *l;
+
+ if (ptr == NULL)
+ return;
+
+ for (l = _aligned_blocks; l != NULL; l = l->next)
+ if (l->aligned == ptr)
+ {
+ l->aligned = NULL; /* Mark the slot in the list as free. */
+ ptr = l->exact;
+ break;
+ }
+
+ if (__free_hook != NULL)
+ (*__free_hook) (ptr);
+ else
+ _free_internal (ptr);
+}
OpenPOWER on IntegriCloud