summaryrefslogtreecommitdiffstats
path: root/lib/libc/stdlib/malloc.c
diff options
context:
space:
mode:
authoralex <alex@FreeBSD.org>1998-09-20 01:07:50 +0000
committeralex <alex@FreeBSD.org>1998-09-20 01:07:50 +0000
commit6afe7a9f53db5bd8cce090491b5b6e776924eec0 (patch)
tree79858ad023a096f8382ff3a53b74946d37ae4b74 /lib/libc/stdlib/malloc.c
parent2b305dce948526bfe115089b257ee5d72d563aa9 (diff)
downloadFreeBSD-src-6afe7a9f53db5bd8cce090491b5b6e776924eec0.zip
FreeBSD-src-6afe7a9f53db5bd8cce090491b5b6e776924eec0.tar.gz
Back out part of previous commit (even though it's technically correct).
Our spinlock implementation allows a particular thread to obtain a lock multiple times, but release the lock with a single unlock call. Since we're detecting recursion, we know the lock is already owned by the current thread in a previous call and must not be released in the current call. This is really far too dependent on this particular spinlock implementation, so I've added commented out calls to THREAD_UNLOCK in the appropriate places. We can activate this code when spinlock is taught to count each lock operation.
Diffstat (limited to 'lib/libc/stdlib/malloc.c')
-rw-r--r--lib/libc/stdlib/malloc.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/lib/libc/stdlib/malloc.c b/lib/libc/stdlib/malloc.c
index 13aed80..1a50df1 100644
--- a/lib/libc/stdlib/malloc.c
+++ b/lib/libc/stdlib/malloc.c
@@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
- * $Id: malloc.c,v 1.39 1998/06/18 09:13:16 peter Exp $
+ * $Id: malloc.c,v 1.40 1998/09/19 20:55:36 alex Exp $
*
*/
@@ -1064,6 +1064,7 @@ malloc(size_t size)
if (malloc_active++) {
wrtwarning("recursive call.\n");
malloc_active--;
+ /*THREAD_UNLOCK();*/ /* XXX */
return (0);
}
if (!malloc_started)
@@ -1087,6 +1088,20 @@ free(void *ptr)
malloc_func = " in free():";
if (malloc_active++) {
wrtwarning("recursive call.\n");
+ /*
+ * XXX
+ * Ideally the next two lines would be gone and free() would
+ * exit below. Unfortunately our spinlock implementation
+ * allows a particular thread to obtain a lock multiple times
+ * without counting how many times said operation has been
+ * performed. The practical upshot of which is a single unlock
+ * causes all locks to be undone at once. For this reason,
+ * we return without performing an unlock in the case of
+ * recursion (see also the commented out THREAD_UNLOCK calls
+ * in malloc & realloc).
+ */
+ malloc_active--;
+ return;
} else {
ifree(ptr);
UTRACE(ptr, 0, 0);
@@ -1106,6 +1121,7 @@ realloc(void *ptr, size_t size)
if (malloc_active++) {
wrtwarning("recursive call.\n");
malloc_active--;
+ /*THREAD_UNLOCK();*/ /* XXX */
return (0);
}
if (ptr && !malloc_started) {
OpenPOWER on IntegriCloud