summaryrefslogtreecommitdiffstats
path: root/lib/libkse/thread/thr_stack.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libkse/thread/thr_stack.c')
-rw-r--r--lib/libkse/thread/thr_stack.c235
1 files changed, 235 insertions, 0 deletions
diff --git a/lib/libkse/thread/thr_stack.c b/lib/libkse/thread/thr_stack.c
new file mode 100644
index 0000000..055af07
--- /dev/null
+++ b/lib/libkse/thread/thr_stack.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/user.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "pthread_private.h"
+
+/* Spare thread stack. */
+struct stack {
+ LIST_ENTRY(stack) qe; /* Stack queue linkage. */
+ size_t stacksize; /* Stack size (rounded up). */
+ size_t guardsize; /* Guard size. */
+ void *stackaddr; /* Stack address. */
+};
+
+/*
+ * Default sized (stack and guard) spare stack queue. Stacks are cached to
+ * avoid additional complexity managing mmap()ed stack regions. Spare stacks
+ * are used in LIFO order to increase cache locality.
+ */
+static LIST_HEAD(, stack) _dstackq = LIST_HEAD_INITIALIZER(_dstackq);
+
+/*
+ * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
+ * Stacks are cached to avoid additional complexity managing mmap()ed stack
+ * regions. This list is unordered, since ordering on both stack size and guard
+ * size would be more trouble than it's worth. Stacks are allocated from this
+ * cache on a first size match basis.
+ */
+static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
+
+/**
+ * Base address of the last stack allocated (including its red zone, if there is
+ * one). Stacks are allocated contiguously, starting beyond the top of the main
+ * stack. When a new stack is created, a red zone is typically created
+ * (actually, the red zone is simply left unmapped) above the top of the stack,
+ * such that the stack will not be able to grow all the way to the bottom of the
+ * next stack. This isn't fool-proof. It is possible for a stack to grow by a
+ * large amount, such that it grows into the next stack, and as long as the
+ * memory within the red zone is never accessed, nothing will prevent one thread
+ * stack from trouncing all over the next.
+ *
+ * low memory
+ * . . . . . . . . . . . . . . . . . .
+ * | |
+ * | stack 3 | start of 3rd thread stack
+ * +-----------------------------------+
+ * | |
+ * | Red Zone (guard page) | red zone for 2nd thread
+ * | |
+ * +-----------------------------------+
+ * | stack 2 - PTHREAD_STACK_DEFAULT | top of 2nd thread stack
+ * | |
+ * | |
+ * | |
+ * | |
+ * | stack 2 |
+ * +-----------------------------------+ <-- start of 2nd thread stack
+ * | |
+ * | Red Zone | red zone for 1st thread
+ * | |
+ * +-----------------------------------+
+ * | stack 1 - PTHREAD_STACK_DEFAULT | top of 1st thread stack
+ * | |
+ * | |
+ * | |
+ * | |
+ * | stack 1 |
+ * +-----------------------------------+ <-- start of 1st thread stack
+ * | | (initial value of last_stack)
+ * | Red Zone |
+ * | | red zone for main thread
+ * +-----------------------------------+
+ * | USRSTACK - PTHREAD_STACK_INITIAL | top of main thread stack
+ * | | ^
+ * | | |
+ * | | |
+ * | | | stack growth
+ * | |
+ * +-----------------------------------+ <-- start of main thread stack
+ * (USRSTACK)
+ * high memory
+ *
+ */
+static void * last_stack = (void *) USRSTACK - PTHREAD_STACK_INITIAL
+ - PTHREAD_GUARD_DEFAULT;
+
+void *
+_thread_stack_alloc(size_t stacksize, size_t guardsize)
+{
+ void *stack = NULL;
+ struct stack *spare_stack;
+ size_t stack_size;
+
+ /*
+ * Round up stack size to nearest multiple of PAGE_SIZE, so that mmap()
+ * will work. If the stack size is not an even multiple, we end up
+ * initializing things such that there is unused space above the
+ * beginning of the stack, so the stack sits snugly against its guard.
+ */
+ if (stacksize % PAGE_SIZE != 0)
+ stack_size = ((stacksize / PAGE_SIZE) + 1) * PAGE_SIZE;
+ else
+ stack_size = stacksize;
+
+ /*
+ * If the stack and guard sizes are default, try to allocate a stack
+ * from the default-size stack cache:
+ */
+ if (stack_size == PTHREAD_STACK_DEFAULT &&
+ guardsize == PTHREAD_GUARD_DEFAULT) {
+ /*
+ * Use the garbage collector mutex for synchronization of the
+ * spare stack list.
+ */
+ if (pthread_mutex_lock(&_gc_mutex) != 0)
+ PANIC("Cannot lock gc mutex");
+
+ if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
+ /* Use the spare stack. */
+ LIST_REMOVE(spare_stack, qe);
+ stack = spare_stack->stackaddr;
+ }
+
+ /* Unlock the garbage collector mutex. */
+ if (pthread_mutex_unlock(&_gc_mutex) != 0)
+ PANIC("Cannot unlock gc mutex");
+ }
+ /*
+ * The user specified a non-default stack and/or guard size, so try to
+ * allocate a stack from the non-default size stack cache, using the
+ * rounded up stack size (stack_size) in the search:
+ */
+ else {
+ /*
+ * Use the garbage collector mutex for synchronization of the
+ * spare stack list.
+ */
+ if (pthread_mutex_lock(&_gc_mutex) != 0)
+ PANIC("Cannot lock gc mutex");
+
+ LIST_FOREACH(spare_stack, &_mstackq, qe) {
+ if (spare_stack->stacksize == stack_size &&
+ spare_stack->guardsize == guardsize) {
+ LIST_REMOVE(spare_stack, qe);
+ stack = spare_stack->stackaddr;
+ break;
+ }
+ }
+
+ /* Unlock the garbage collector mutex. */
+ if (pthread_mutex_unlock(&_gc_mutex) != 0)
+ PANIC("Cannot unlock gc mutex");
+ }
+
+ /* Check if a stack was not allocated from a stack cache: */
+ if (stack == NULL) {
+
+ /* Allocate a new stack. */
+
+ stack = last_stack - stack_size;
+
+ /*
+ * Even if stack allocation fails, we don't want to try to use
+ * this location again, so unconditionally decrement
+ * last_stack. Under normal operating conditions, the most
+ * likely reason for an mmap() error is a stack overflow of the
+ * adjacent thread stack.
+ */
+ last_stack -= (stack_size + guardsize);
+
+ /* Stack: */
+ if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
+ -1, 0) == MAP_FAILED)
+ stack = NULL;
+ }
+
+ return (stack);
+}
+
+/* This function must be called with _gc_mutex held. */
+void
+_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
+{
+ struct stack *spare_stack;
+
+ spare_stack = (stack + stacksize - sizeof(struct stack));
+ /* Round stacksize up to nearest multiple of PAGE_SIZE. */
+ if (stacksize % PAGE_SIZE != 0) {
+ spare_stack->stacksize = ((stacksize / PAGE_SIZE) + 1) *
+ PAGE_SIZE;
+ } else
+ spare_stack->stacksize = stacksize;
+ spare_stack->guardsize = guardsize;
+ spare_stack->stackaddr = stack;
+
+ if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
+ spare_stack->guardsize == PTHREAD_GUARD_DEFAULT) {
+ /* Default stack/guard size. */
+ LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
+ } else {
+ /* Non-default stack/guard size. */
+ LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
+ }
+}
OpenPOWER on IntegriCloud