summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/thread/thr_stack.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libpthread/thread/thr_stack.c')
-rw-r--r--lib/libpthread/thread/thr_stack.c209
1 files changed, 103 insertions, 106 deletions
diff --git a/lib/libpthread/thread/thr_stack.c b/lib/libpthread/thread/thr_stack.c
index c75d6ee..f14289e 100644
--- a/lib/libpthread/thread/thr_stack.c
+++ b/lib/libpthread/thread/thr_stack.c
@@ -28,9 +28,7 @@
*/
#include <sys/types.h>
#include <sys/mman.h>
-#include <sys/param.h>
#include <sys/queue.h>
-#include <sys/user.h>
#include <stdlib.h>
#include <pthread.h>
#include "thr_private.h"
@@ -44,31 +42,32 @@ struct stack {
};
/*
- * Default sized (stack and guard) spare stack queue. Stacks are cached to
- * avoid additional complexity managing mmap()ed stack regions. Spare stacks
- * are used in LIFO order to increase cache locality.
+ * Default sized (stack and guard) spare stack queue. Stacks are cached
+ * to avoid additional complexity managing mmap()ed stack regions. Spare
+ * stacks are used in LIFO order to increase cache locality.
*/
-static LIST_HEAD(, stack) _dstackq = LIST_HEAD_INITIALIZER(_dstackq);
+static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
/*
* Miscellaneous sized (non-default stack and/or guard) spare stack queue.
- * Stacks are cached to avoid additional complexity managing mmap()ed stack
- * regions. This list is unordered, since ordering on both stack size and guard
- * size would be more trouble than it's worth. Stacks are allocated from this
- * cache on a first size match basis.
+ * Stacks are cached to avoid additional complexity managing mmap()ed
+ * stack regions. This list is unordered, since ordering on both stack
+ * size and guard size would be more trouble than it's worth. Stacks are
+ * allocated from this cache on a first size match basis.
*/
-static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
+static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
/**
- * Base address of the last stack allocated (including its red zone, if there is
- * one). Stacks are allocated contiguously, starting beyond the top of the main
- * stack. When a new stack is created, a red zone is typically created
- * (actually, the red zone is simply left unmapped) above the top of the stack,
- * such that the stack will not be able to grow all the way to the bottom of the
- * next stack. This isn't fool-proof. It is possible for a stack to grow by a
- * large amount, such that it grows into the next stack, and as long as the
- * memory within the red zone is never accessed, nothing will prevent one thread
- * stack from trouncing all over the next.
+ * Base address of the last stack allocated (including its red zone, if
+ * there is one). Stacks are allocated contiguously, starting beyond the
+ * top of the main stack. When a new stack is created, a red zone is
+ * typically created (actually, the red zone is simply left unmapped) above
+ * the top of the stack, such that the stack will not be able to grow all
+ * the way to the bottom of the next stack. This isn't fool-proof. It is
+ * possible for a stack to grow by a large amount, such that it grows into
+ * the next stack, and as long as the memory within the red zone is never
+ * accessed, nothing will prevent one thread stack from trouncing all over
+ * the next.
*
* low memory
* . . . . . . . . . . . . . . . . . .
@@ -112,50 +111,51 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* high memory
*
*/
-static void * last_stack;
+static void *last_stack = NULL;
-void *
-_thread_stack_alloc(size_t stacksize, size_t guardsize)
+int
+_thr_stack_alloc(struct pthread_attr *attr)
{
- void *stack = NULL;
- struct stack *spare_stack;
- size_t stack_size;
+ struct stack *spare_stack;
+ struct kse *curkse;
+ kse_critical_t crit;
+ size_t stacksize;
+ size_t guardsize;
+
+ stacksize = attr->stacksize_attr;
+ guardsize = attr->guardsize_attr;
/*
- * Round up stack size to nearest multiple of _pthread_page_size,
- * so that mmap() * will work. If the stack size is not an even
- * multiple, we end up initializing things such that there is unused
- * space above the beginning of the stack, so the stack sits snugly
- * against its guard.
+ * Round up stack size to nearest multiple of _thr_page_size so
+ * that mmap() * will work. If the stack size is not an even
+ * multiple, we end up initializing things such that there is
+ * unused space above the beginning of the stack, so the stack
+ * sits snugly against its guard.
*/
- if (stacksize % _pthread_page_size != 0)
- stack_size = ((stacksize / _pthread_page_size) + 1) *
- _pthread_page_size;
- else
- stack_size = stacksize;
+ if ((stacksize % _thr_page_size) != 0)
+ stacksize = ((stacksize / _thr_page_size) + 1) *
+ _thr_page_size;
+ attr->stackaddr_attr = NULL;
+ attr->flags &= ~THR_STACK_USER;
/*
+ * Use the garbage collector lock for synchronization of the
+ * spare stack lists and allocations from usrstack.
+ */
+ crit = _kse_critical_enter();
+ curkse = _get_curkse();
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ /*
* If the stack and guard sizes are default, try to allocate a stack
* from the default-size stack cache:
*/
- if (stack_size == PTHREAD_STACK_DEFAULT &&
- guardsize == _pthread_guard_default) {
- /*
- * Use the garbage collector mutex for synchronization of the
- * spare stack list.
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
- /* Use the spare stack. */
+ if ((stacksize == THR_STACK_DEFAULT) &&
+ (guardsize == _thr_guard_default)) {
+ if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
+ /* Use the spare stack. */
LIST_REMOVE(spare_stack, qe);
- stack = spare_stack->stackaddr;
+ attr->stackaddr_attr = spare_stack->stackaddr;
}
-
- /* Unlock the garbage collector mutex. */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
}
/*
* The user specified a non-default stack and/or guard size, so try to
@@ -163,78 +163,75 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
* rounded up stack size (stack_size) in the search:
*/
else {
- /*
- * Use the garbage collector mutex for synchronization of the
- * spare stack list.
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- LIST_FOREACH(spare_stack, &_mstackq, qe) {
- if (spare_stack->stacksize == stack_size &&
+ LIST_FOREACH(spare_stack, &mstackq, qe) {
+ if (spare_stack->stacksize == stacksize &&
spare_stack->guardsize == guardsize) {
LIST_REMOVE(spare_stack, qe);
- stack = spare_stack->stackaddr;
+ attr->stackaddr_attr = spare_stack->stackaddr;
break;
}
}
-
- /* Unlock the garbage collector mutex. */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
}
-
- /* Check if a stack was not allocated from a stack cache: */
- if (stack == NULL) {
-
+ if (attr->stackaddr_attr != NULL) {
+ /* A cached stack was found. Release the lock. */
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ _kse_critical_leave(crit);
+ }
+ else {
+ /* Allocate a stack from usrstack. */
if (last_stack == NULL)
- last_stack = _usrstack - PTHREAD_STACK_INITIAL -
- _pthread_guard_default;
+ last_stack = _usrstack - THR_STACK_INITIAL -
+ _thr_guard_default;
/* Allocate a new stack. */
- stack = last_stack - stack_size;
+ attr->stackaddr_attr = last_stack - stacksize;
/*
- * Even if stack allocation fails, we don't want to try to use
- * this location again, so unconditionally decrement
+ * Even if stack allocation fails, we don't want to try to
+ * use this location again, so unconditionally decrement
* last_stack. Under normal operating conditions, the most
- * likely reason for an mmap() error is a stack overflow of the
- * adjacent thread stack.
+ * likely reason for an mmap() error is a stack overflow of
+ * the adjacent thread stack.
*/
- last_stack -= (stack_size + guardsize);
+ last_stack -= (stacksize + guardsize);
- /* Stack: */
- if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
- -1, 0) == MAP_FAILED)
- stack = NULL;
- }
+ /* Release the lock before mmap'ing it. */
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ _kse_critical_leave(crit);
- return (stack);
+ /* Map the stack, but not the guard page: */
+ if (mmap(attr->stackaddr_attr, stacksize,
+ PROT_READ | PROT_WRITE, MAP_STACK, -1, 0) == MAP_FAILED)
+ attr->stackaddr_attr = NULL;
+ }
+ if (attr->stackaddr_attr != NULL)
+ return (0);
+ else
+ return (-1);
}
-/* This function must be called with _gc_mutex held. */
+/* This function must be called with _thread_list_lock held. */
void
-_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
+_thr_stack_free(struct pthread_attr *attr)
{
- struct stack *spare_stack;
-
- spare_stack = (stack + stacksize - sizeof(struct stack));
- /* Round stacksize up to nearest multiple of _pthread_page_size. */
- if (stacksize % _pthread_page_size != 0) {
- spare_stack->stacksize =
- ((stacksize / _pthread_page_size) + 1) *
- _pthread_page_size;
- } else
- spare_stack->stacksize = stacksize;
- spare_stack->guardsize = guardsize;
- spare_stack->stackaddr = stack;
-
- if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
- spare_stack->guardsize == _pthread_guard_default) {
- /* Default stack/guard size. */
- LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
- } else {
- /* Non-default stack/guard size. */
- LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
+ struct stack *spare_stack;
+
+ if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
+ && (attr->stackaddr_attr != NULL)) {
+ spare_stack = (attr->stackaddr_attr + attr->stacksize_attr
+ - sizeof(struct stack));
+ spare_stack->stacksize = attr->stacksize_attr;
+ spare_stack->guardsize = attr->guardsize_attr;
+ spare_stack->stackaddr = attr->stackaddr_attr;
+
+ if (spare_stack->stacksize == THR_STACK_DEFAULT &&
+ spare_stack->guardsize == _thr_guard_default) {
+ /* Default stack/guard size. */
+ LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
+ } else {
+ /* Non-default stack/guard size. */
+ LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
+ }
+ attr->stackaddr_attr = NULL;
}
}
OpenPOWER on IntegriCloud