summaryrefslogtreecommitdiffstats
path: root/lib/libpthread
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>1999-12-29 15:44:59 +0000
committerjasone <jasone@FreeBSD.org>1999-12-29 15:44:59 +0000
commita0b3dffc3d8f4c9f62816004946bf6c95f9f9971 (patch)
tree8ba7518bd446eaa7340253f9f0f0ebaa35117340 /lib/libpthread
parent383cb3575ca956c99535f199b7697e0cb3a5f5b0 (diff)
downloadFreeBSD-src-a0b3dffc3d8f4c9f62816004946bf6c95f9f9971.zip
FreeBSD-src-a0b3dffc3d8f4c9f62816004946bf6c95f9f9971.tar.gz
Don't explicitly mmap() red zones at the bottom of thread stacks (except
the initial thread). Instead, just leave an unmapped gap between thread stacks and make sure that the thread stacks won't grow into these gaps, simply by limiting the size of the stacks with the 'len' argument to mmap(). This (if I understand correctly) reduces VM overhead considerably. Reviewed by: deischen
Diffstat (limited to 'lib/libpthread')
-rw-r--r--lib/libpthread/thread/thr_create.c11
-rw-r--r--lib/libpthread/thread/thr_init.c8
-rw-r--r--lib/libpthread/thread/thr_private.h23
3 files changed, 26 insertions, 16 deletions
diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c
index a392cba..fd9e746 100644
--- a/lib/libpthread/thread/thr_create.c
+++ b/lib/libpthread/thread/thr_create.c
@@ -136,20 +136,11 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (pthread_mutex_unlock(&_gc_mutex) != 0)
PANIC("Cannot unlock gc mutex");
- /* Red zone: */
- if (mmap(stack - PTHREAD_STACK_GUARD,
- PTHREAD_STACK_GUARD, 0, MAP_ANON,
- -1, 0) == MAP_FAILED) {
- ret = EAGAIN;
- free(new_thread);
- }
/* Stack: */
- else if (mmap(stack, PTHREAD_STACK_DEFAULT,
+ if (mmap(stack, PTHREAD_STACK_DEFAULT,
PROT_READ | PROT_WRITE, MAP_STACK,
-1, 0) == MAP_FAILED) {
ret = EAGAIN;
- munmap(stack - PTHREAD_STACK_GUARD,
- PTHREAD_STACK_GUARD);
free(new_thread);
}
}
diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c
index c888697..051483f 100644
--- a/lib/libpthread/thread/thr_init.c
+++ b/lib/libpthread/thread/thr_init.c
@@ -186,7 +186,13 @@ _thread_init(void)
/* Initialize the thread stack cache: */
SLIST_INIT(&_stackq);
- /* Create the red zone for the main stack. */
+ /*
+ * Create a red zone below the main stack. All other stacks are
+ * constrained to a maximum size by the paramters passed to
+ * mmap(), but this stack is only limited by resource limits, so
+ * this stack needs an explicitly mapped red zone to protect the
+ * thread stack that is just beyond.
+ */
if (mmap((void *) USRSTACK - PTHREAD_STACK_INITIAL -
PTHREAD_STACK_GUARD, PTHREAD_STACK_GUARD, 0, MAP_ANON,
-1, 0) == MAP_FAILED)
diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h
index 4326bf6..8a126f4 100644
--- a/lib/libpthread/thread/thr_private.h
+++ b/lib/libpthread/thread/thr_private.h
@@ -335,7 +335,13 @@ struct pthread_attr {
* Miscellaneous definitions.
*/
#define PTHREAD_STACK_DEFAULT 65536
-/* Size of red zone at the end of each stack. */
+/*
+ * Size of red zone at the end of each stack. In actuality, this "red zone" is
+ * merely an unmapped region, except in the case of the initial stack. Since
+ * mmap() makes it possible to specify the maximum growth of a MAP_STACK region,
+ * an unmapped gap between thread stacks achieves the same effect as explicitly
+ * mapped red zones.
+ */
#define PTHREAD_STACK_GUARD PAGE_SIZE
/*
@@ -904,10 +910,17 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
*/
SCLASS SLIST_HEAD(, stack) _stackq;
-/* Base address of next unallocated default-size stack. Stacks are allocated
- * contiguously, starting below the beginning of the main stack. When a new
- * stack is created, a guard page is created just above it in order to (usually)
- * detect attempts by the adjacent stack to trounce the next thread stack. */
+/*
+ * Base address of next unallocated default-size {stack, red zone}. Stacks are
+ * allocated contiguously, starting below the bottom of the main stack. When a
+ * new stack is created, a red zone is created (actually, the red zone is simply
+ * left unmapped) below the bottom of the stack, such that the stack will not be
+ * able to grow all the way to the top of the next stack. This isn't
+ * fool-proof. It is possible for a stack to grow by a large amount, such that
+ * it grows into the next stack, and as long as the memory within the red zone
+ * is never accessed, nothing will prevent one thread stack from trouncing all
+ * over the next.
+ */
SCLASS void * _next_stack
#ifdef GLOBAL_PTHREAD_PRIVATE
/* main stack top - main stack size - stack size - (red zone + main stack red zone) */
OpenPOWER on IntegriCloud