summaryrefslogtreecommitdiffstats
path: root/lib/libc/stdlib/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libc/stdlib/malloc.c')
-rw-r--r--lib/libc/stdlib/malloc.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/lib/libc/stdlib/malloc.c b/lib/libc/stdlib/malloc.c
index 13aed80..1a50df1 100644
--- a/lib/libc/stdlib/malloc.c
+++ b/lib/libc/stdlib/malloc.c
@@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
- * $Id: malloc.c,v 1.39 1998/06/18 09:13:16 peter Exp $
+ * $Id: malloc.c,v 1.40 1998/09/19 20:55:36 alex Exp $
*
*/
@@ -1064,6 +1064,7 @@ malloc(size_t size)
if (malloc_active++) {
wrtwarning("recursive call.\n");
malloc_active--;
+ /*THREAD_UNLOCK();*/ /* XXX */
return (0);
}
if (!malloc_started)
@@ -1087,6 +1088,20 @@ free(void *ptr)
malloc_func = " in free():";
if (malloc_active++) {
wrtwarning("recursive call.\n");
+ /*
+ * XXX
+ * Ideally the next two lines would be gone and free() would
+ * exit below. Unfortunately our spinlock implementation
+ * allows a particular thread to obtain a lock multiple times
+ * without counting how many times said operation has been
+ * performed. The practical upshot of which is a single unlock
+ * causes all locks to be undone at once. For this reason,
+ * we return without performing an unlock in the case of
+ * recursion (see also the commented out THREAD_UNLOCK calls
+ * in malloc & realloc).
+ */
+ malloc_active--;
+ return;
} else {
ifree(ptr);
UTRACE(ptr, 0, 0);
@@ -1106,6 +1121,7 @@ realloc(void *ptr, size_t size)
if (malloc_active++) {
wrtwarning("recursive call.\n");
malloc_active--;
+ /*THREAD_UNLOCK();*/ /* XXX */
return (0);
}
if (ptr && !malloc_started) {
OpenPOWER on IntegriCloud