diff options
author | mini <mini@FreeBSD.org> | 2002-10-30 06:07:18 +0000 |
---|---|---|
committer | mini <mini@FreeBSD.org> | 2002-10-30 06:07:18 +0000 |
commit | ee4068ef29f700291bdfe8867286996571bb58aa (patch) | |
tree | 0bf6e4da0afbcb394be3c20c8b89b3a674bd8c07 /lib | |
parent | a8da55afda685ca2151899c0d25f837c962b9670 (diff) | |
download | FreeBSD-src-ee4068ef29f700291bdfe8867286996571bb58aa.zip FreeBSD-src-ee4068ef29f700291bdfe8867286996571bb58aa.tar.gz |
Use KSE to schedule threads.
Diffstat (limited to 'lib')
-rw-r--r-- | lib/libkse/arch/i386/i386/thr_enter_uts.S | 96 | ||||
-rw-r--r-- | lib/libkse/sys/Makefile.inc | 3 | ||||
-rw-r--r-- | lib/libkse/thread/Makefile.inc | 1 | ||||
-rw-r--r-- | lib/libkse/thread/thr_create.c | 16 | ||||
-rw-r--r-- | lib/libkse/thread/thr_init.c | 27 | ||||
-rw-r--r-- | lib/libkse/thread/thr_kern.c | 416 | ||||
-rw-r--r-- | lib/libkse/thread/thr_printf.c | 124 | ||||
-rw-r--r-- | lib/libkse/thread/thr_priority_queue.c | 6 | ||||
-rw-r--r-- | lib/libkse/thread/thr_private.h | 33 | ||||
-rw-r--r-- | lib/libpthread/arch/i386/i386/thr_enter_uts.S | 96 | ||||
-rw-r--r-- | lib/libpthread/arch/i386/i386/thr_switch.S | 89 | ||||
-rw-r--r-- | lib/libpthread/sys/Makefile.inc | 3 | ||||
-rw-r--r-- | lib/libpthread/thread/Makefile.inc | 1 | ||||
-rw-r--r-- | lib/libpthread/thread/thr_create.c | 16 | ||||
-rw-r--r-- | lib/libpthread/thread/thr_init.c | 27 | ||||
-rw-r--r-- | lib/libpthread/thread/thr_kern.c | 416 | ||||
-rw-r--r-- | lib/libpthread/thread/thr_printf.c | 124 | ||||
-rw-r--r-- | lib/libpthread/thread/thr_priority_queue.c | 6 | ||||
-rw-r--r-- | lib/libpthread/thread/thr_private.h | 33 |
19 files changed, 1015 insertions, 518 deletions
diff --git a/lib/libkse/arch/i386/i386/thr_enter_uts.S b/lib/libkse/arch/i386/i386/thr_enter_uts.S new file mode 100644 index 0000000..ad643a4 --- /dev/null +++ b/lib/libkse/arch/i386/i386/thr_enter_uts.S @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2002 Jonathan Mini <mini@freebsd.org>. + * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <machine/asm.h> +__FBSDID("$FreeBSD$"); + +/* + * Where do we define these? + */ +#define MC_SIZE 640 /* sizeof mcontext_t */ +#define UC_MC_OFFSET 16 /* offset to mcontext from ucontext */ +#define MC_LEN_OFFSET 80 /* offset to mc_len from mcontext */ +#define MC_FP_CW_OFFSET 96 /* offset to FP control word */ +#define MC_OWNEDFP_OFFSET 88 /* offset to mc_ownedfp from mcontext */ +#define KM_STACK_SP_OFFSET 32 /* offset to km_stack.ss_sp */ +#define KM_STACK_SIZE_OFFSET 36 /* offset to km_stack.ss_sp */ +#define KM_FUNC_OFFSET 28 /* offset to km_func */ + +/* + * int _thread_enter_uts(kse_thr_mailbox *tm, kse_mailbox *km); + * + * Returns 0 on success, -1 otherwise. + */ +ENTRY(_thread_enter_uts) + movl 4(%esp), %eax /* get address of context */ + cmpl $0, %eax /* check for null pointer */ + jne 1f + movl $-1, %eax + jmp 2f +1: pushl %edx /* save value of edx */ + movl %eax, %edx /* get address of context */ + addl $UC_MC_OFFSET, %edx /* add offset to mcontext */ + movl %gs, 4(%edx) + movl %fs, 8(%edx) + movl %es, 12(%edx) + movl %ds, 16(%edx) + movl %edi, 20(%edx) + movl %esi, 24(%edx) + movl %ebp, 28(%edx) + movl %ebx, 36(%edx) + movl $0, 48(%edx) /* store successful return in eax */ + popl %eax /* get saved value of edx */ + movl %eax, 40(%edx) /* save edx */ + movl %ecx, 44(%edx) + movl (%esp), %eax /* get return address */ + movl %eax, 60(%edx) /* save return address */ + movl %ss, 76(%edx) + /* + * Don't save floating point registers here. + * + * This is an explicit call to get the current context, so + * the caller is done with the floating point registers. + * Contexts formed by involuntary switches, such as signal delivery, + * have floating point registers saved by the kernel. + */ + fnstcw MC_FP_CW_OFFSET(%edx) + movl $0, MC_OWNEDFP_OFFSET(%edx) /* no FP */ + lahf /* get eflags */ + movl %eax, 68(%edx) /* store eflags */ + movl %esp, %eax /* setcontext pushes the return */ + addl $4, %eax /* address onto the top of the */ + movl %eax, 72(%edx) /* stack; account for this */ + movl $MC_SIZE, MC_LEN_OFFSET(%edx) /* context is now valid */ + movl 8(%esp), %edx /* get address of mailbox */ + movl KM_STACK_SP_OFFSET(%edx), %eax /* get bottom of stack */ + addl KM_STACK_SIZE_OFFSET(%edx), %eax /* add length */ + movl %eax, %esp /* switch to the uts's stack */ + pushl %edx /* push the address of the mailbox */ + pushl KM_FUNC_OFFSET(%edx) /* .. the uts can return to itself */ + pushl KM_FUNC_OFFSET(%edx) /* push the address of the uts func */ +2: ret + diff --git a/lib/libkse/sys/Makefile.inc b/lib/libkse/sys/Makefile.inc index bfcb3d9..2945285 100644 --- a/lib/libkse/sys/Makefile.inc +++ b/lib/libkse/sys/Makefile.inc @@ -2,5 +2,4 @@ .PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} -SRCS+= thr_error.c _atomic_lock.S - +SRCS+= thr_error.c _atomic_lock.S thr_enter_uts.S thr_switch.S diff --git a/lib/libkse/thread/Makefile.inc b/lib/libkse/thread/Makefile.inc index 6941c6a..7b514c8 100644 --- a/lib/libkse/thread/Makefile.inc +++ b/lib/libkse/thread/Makefile.inc @@ -62,6 +62,7 @@ SRCS+= \ thr_open.c \ thr_pause.c \ thr_poll.c \ + thr_printf.c \ thr_priority_queue.c \ thr_pselect.c \ thr_read.c \ diff --git a/lib/libkse/thread/thr_create.c b/lib/libkse/thread/thr_create.c index 0e72e96..c9f5a42 100644 --- a/lib/libkse/thread/thr_create.c +++ b/lib/libkse/thread/thr_create.c @@ -50,7 +50,7 @@ int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); -int _thread_ctx_offset = OFF(ctx); +int _thread_ctx_offset = OFF(mailbox.tm_context); #undef OFF int _thread_PS_RUNNING_value = PS_RUNNING; @@ -122,11 +122,14 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, new_thread->magic = PTHREAD_MAGIC; /* Initialise the machine context: */ - getcontext(&new_thread->ctx); - new_thread->ctx.uc_stack.ss_sp = new_thread->stack; - new_thread->ctx.uc_stack.ss_size = + getcontext(&new_thread->mailbox.tm_context); + new_thread->mailbox.tm_context.uc_stack.ss_sp = + new_thread->stack; + new_thread->mailbox.tm_context.uc_stack.ss_size = pattr->stacksize_attr; - makecontext(&new_thread->ctx, _thread_start, 1); + makecontext(&new_thread->mailbox.tm_context, + _thread_start, 1); + new_thread->mailbox.tm_udata = (void *)new_thread; /* Copy the thread attributes: */ memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr)); @@ -230,9 +233,6 @@ _thread_start(void) { struct pthread *curthread = _get_curthread(); - /* We just left the scheduler via swapcontext: */ - _thread_kern_in_sched = 0; - /* Run the current thread's start routine with argument: */ pthread_exit(curthread->start_routine(curthread->arg)); diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c index 9f6a533..4dd356a 100644 --- a/lib/libkse/thread/thr_init.c +++ b/lib/libkse/thread/thr_init.c @@ -266,15 +266,12 @@ _thread_init(void) _thread_initial->attr.stackaddr_attr = _thread_initial->stack; _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL; - /* Setup the context for the scheduler: */ - getcontext(&_thread_kern_sched_ctx); - _thread_kern_sched_ctx.uc_stack.ss_sp = + /* Setup the context for the scheduler. */ + _thread_kern_kse_mailbox.km_stack.ss_sp = _thread_kern_sched_stack; - _thread_kern_sched_ctx.uc_stack.ss_size = sched_stack_size; - makecontext(&_thread_kern_sched_ctx, _thread_kern_scheduler, 1); - - /* Block all signals to the scheduler's context. */ - sigfillset(&_thread_kern_sched_ctx.uc_sigmask); + _thread_kern_kse_mailbox.km_stack.ss_size = sched_stack_size; + _thread_kern_kse_mailbox.km_func = + (void *)_thread_kern_scheduler; /* * Write a magic value to the thread structure @@ -287,9 +284,12 @@ _thread_init(void) PTHREAD_CANCEL_DEFERRED; /* Setup the context for initial thread. */ - getcontext(&_thread_initial->ctx); - _thread_kern_sched_ctx.uc_stack.ss_sp = _thread_initial->stack; - _thread_kern_sched_ctx.uc_stack.ss_size = PTHREAD_STACK_INITIAL; + getcontext(&_thread_initial->mailbox.tm_context); + _thread_initial->mailbox.tm_context.uc_stack.ss_sp = + _thread_initial->stack; + _thread_initial->mailbox.tm_context.uc_stack.ss_size = + PTHREAD_STACK_INITIAL; + _thread_initial->mailbox.tm_udata = (void *)_thread_initial; /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; @@ -334,6 +334,11 @@ _thread_init(void) _clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ? clockinfo.tick : CLOCK_RES_USEC_MIN; + /* Start KSE. */ + _thread_kern_kse_mailbox.km_curthread = + &_thread_initial->mailbox; + if (kse_create(&_thread_kern_kse_mailbox, 0) != 0) + PANIC("kse_new failed"); } /* Initialise the garbage collector mutex and condition variable. */ diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c index 36ed8d8..d804a0c 100644 --- a/lib/libkse/thread/thr_kern.c +++ b/lib/libkse/thread/thr_kern.c @@ -73,27 +73,137 @@ static int last_tick = 0; void _thread_kern_sched(void) { + struct timespec ts; + struct timeval tv; struct pthread *curthread = _get_curthread(); + unsigned int current_tick; + + /* Get the current time of day. */ + GET_CURRENT_TOD(tv); + TIMEVAL_TO_TIMESPEC(&tv, &ts); + current_tick = _sched_ticks; /* - * Flag the pthread kernel as executing scheduler code - * to avoid a scheduler signal from interrupting this - * execution and calling the scheduler again. + * Enter a critical section. + */ + _thread_kern_kse_mailbox.km_curthread = NULL; + + /* + * If this thread is becoming inactive, make note of the + * time. + */ + if (curthread->state != PS_RUNNING) { + /* + * Save the current time as the time that the + * thread became inactive: + */ + curthread->last_inactive = (long)current_tick; + if (curthread->last_inactive < + curthread->last_active) { + /* Account for a rollover: */ + curthread->last_inactive =+ + UINT_MAX + 1; + } + } + + /* + * Place this thread into the appropriate queue(s). */ - _thread_kern_in_sched = 1; + switch (curthread->state) { + case PS_DEAD: + case PS_STATE_MAX: /* XXX: silences -Wall */ + case PS_SUSPENDED: + /* Dead or suspended threads are not placed in any queue. */ + break; + case PS_RUNNING: + /* + * Save the current time as the time that the + * thread became inactive: + */ + current_tick = _sched_ticks; + curthread->last_inactive = (long)current_tick; + if (curthread->last_inactive < + curthread->last_active) { + /* Account for a rollover: */ + curthread->last_inactive =+ UINT_MAX + 1; + } + + if ((curthread->slice_usec != -1) && + (curthread->attr.sched_policy != SCHED_FIFO)) { + /* + * Accumulate the number of microseconds for + * which the current thread has run: + */ + curthread->slice_usec += + (curthread->last_inactive - + curthread->last_active) * + (long)_clock_res_usec; + /* Check for time quantum exceeded: */ + if (curthread->slice_usec > TIMESLICE_USEC) + curthread->slice_usec = -1; + } + + if (curthread->slice_usec == -1) { + /* + * The thread exceeded its time + * quantum or it yielded the CPU; + * place it at the tail of the + * queue for its priority. + */ + PTHREAD_PRIOQ_INSERT_TAIL(curthread); + } else { + /* + * The thread hasn't exceeded its + * interval. Place it at the head + * of the queue for its priority. + */ + PTHREAD_PRIOQ_INSERT_HEAD(curthread); + } + break; + case PS_SPINBLOCK: + /* Increment spinblock count. */ + _spinblock_count++; + /*FALLTHROUGH*/ + case PS_DEADLOCK: + case PS_JOIN: + case PS_MUTEX_WAIT: + case PS_WAIT_WAIT: + /* No timeouts for these states. */ + curthread->wakeup_time.tv_sec = -1; + curthread->wakeup_time.tv_nsec = -1; + + /* Restart the time slice. */ + curthread->slice_usec = -1; + + /* Insert into the waiting queue. */ + PTHREAD_WAITQ_INSERT(curthread); + break; + + case PS_COND_WAIT: + case PS_SLEEP_WAIT: + /* These states can timeout. */ + /* Restart the time slice. */ + curthread->slice_usec = -1; + + /* Insert into the waiting queue. */ + PTHREAD_WAITQ_INSERT(curthread); + break; + } /* Switch into the scheduler's context. */ - swapcontext(&curthread->ctx, &_thread_kern_sched_ctx); - DBG_MSG("Returned from swapcontext, thread %p\n", curthread); + DBG_MSG("Calling _thread_enter_uts()\n"); + _thread_enter_uts(&curthread->mailbox, &_thread_kern_kse_mailbox); + DBG_MSG("Returned from _thread_enter_uts, thread %p\n", curthread); /* - * This point is reached when swapcontext() is called + * This point is reached when _thread_switch() is called * to restore the state of a thread. * - * This is the normal way out of the scheduler. + * This is the normal way out of the scheduler (for synchronous + * switches). */ - _thread_kern_in_sched = 0; + /* XXXKSE: Do this inside _thread_kern_scheduler() */ if (curthread->sig_defer_count == 0) { if (((curthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && @@ -117,125 +227,47 @@ _thread_kern_sched(void) } void -_thread_kern_scheduler(void) +_thread_kern_scheduler(struct kse_mailbox *km) { struct timespec ts; struct timeval tv; - struct pthread *curthread = _get_curthread(); - pthread_t pthread, pthread_h; + pthread_t td, pthread, pthread_h; unsigned int current_tick; - int add_to_prioq; + struct kse_thr_mailbox *tm, *p; - /* - * Enter a scheduling loop that finds the next thread that is - * ready to run. This loop completes when there are no more threads - * in the global list. It is interrupted each time a thread is - * scheduled, but will continue when we return. - */ - while (!(TAILQ_EMPTY(&_thread_list))) { + DBG_MSG("entering\n"); + while (!TAILQ_EMPTY(&_thread_list)) { - /* If the currently running thread is a user thread, save it: */ - if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0) - _last_user_thread = curthread; - - /* Get the current time of day: */ + /* Get the current time of day. */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); current_tick = _sched_ticks; - add_to_prioq = 0; - if (curthread != &_thread_kern_thread) { - /* - * This thread no longer needs to yield the CPU. - */ - if (curthread->state != PS_RUNNING) { - /* - * Save the current time as the time that the - * thread became inactive: - */ - curthread->last_inactive = (long)current_tick; - if (curthread->last_inactive < - curthread->last_active) { - /* Account for a rollover: */ - curthread->last_inactive =+ - UINT_MAX + 1; - } - } - - /* - * Place the currently running thread into the - * appropriate queue(s). - */ - switch (curthread->state) { - case PS_DEAD: - case PS_STATE_MAX: /* to silence -Wall */ - case PS_SUSPENDED: - /* - * Dead and suspended threads are not placed - * in any queue: - */ - break; - - case PS_RUNNING: - /* - * Runnable threads can't be placed in the - * priority queue until after waiting threads - * are polled (to preserve round-robin - * scheduling). - */ - add_to_prioq = 1; - break; - - /* - * States which do not depend on file descriptor I/O - * operations or timeouts: - */ - case PS_DEADLOCK: - case PS_JOIN: - case PS_MUTEX_WAIT: - case PS_WAIT_WAIT: - /* No timeouts for these states: */ - curthread->wakeup_time.tv_sec = -1; - curthread->wakeup_time.tv_nsec = -1; - - /* Restart the time slice: */ - curthread->slice_usec = -1; - - /* Insert into the waiting queue: */ - PTHREAD_WAITQ_INSERT(curthread); - break; - - /* States which can timeout: */ - case PS_COND_WAIT: - case PS_SLEEP_WAIT: - /* Restart the time slice: */ - curthread->slice_usec = -1; - - /* Insert into the waiting queue: */ - PTHREAD_WAITQ_INSERT(curthread); - break; - - /* States that require periodic work: */ - case PS_SPINBLOCK: - /* No timeouts for this state: */ - curthread->wakeup_time.tv_sec = -1; - curthread->wakeup_time.tv_nsec = -1; - - /* Increment spinblock count: */ - _spinblock_count++; - - /* FALLTHROUGH */ - } + /* + * Pick up threads that had blocked in the kernel and + * have now completed their trap (syscall, vm fault, etc). + * These threads were PS_RUNNING (and still are), but they + * need to be added to the run queue so that they can be + * scheduled again. + */ + DBG_MSG("Picking up km_completed\n"); + p = km->km_completed; + km->km_completed = NULL; /* XXX: Atomic xchg here. */ + while ((tm = p) != NULL) { + p = tm->tm_next; + tm->tm_next = NULL; + DBG_MSG("\tmailbox=%p pthread=%p\n", tm, tm->tm_udata); + PTHREAD_PRIOQ_INSERT_TAIL((pthread_t)tm->tm_udata); } - last_tick = current_tick; + /* Deliver posted signals. */ + /* XXX: Not yet. */ + DBG_MSG("Picking up signals\n"); - /* - * Wake up threads that have timedout. This has to be - * done after polling in case a thread does a poll or - * select with zero time. - */ + /* Wake up threads that have timed out. */ + DBG_MSG("setactive\n"); PTHREAD_WAITQ_SETACTIVE(); + DBG_MSG("Picking up timeouts (%x)\n", TAILQ_FIRST(&_waitingq)); while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) && (pthread->wakeup_time.tv_sec != -1) && (((pthread->wakeup_time.tv_sec == 0) && @@ -243,6 +275,7 @@ _thread_kern_scheduler(void) (pthread->wakeup_time.tv_sec < ts.tv_sec) || ((pthread->wakeup_time.tv_sec == ts.tv_sec) && (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) { + DBG_MSG("\t...\n"); /* * Remove this thread from the waiting queue * (and work queue if necessary) and place it @@ -251,6 +284,7 @@ _thread_kern_scheduler(void) PTHREAD_WAITQ_CLEARACTIVE(); if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ) PTHREAD_WORKQ_REMOVE(pthread); + DBG_MSG("\twaking thread\n"); PTHREAD_NEW_STATE(pthread, PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* @@ -258,119 +292,39 @@ _thread_kern_scheduler(void) */ pthread->timeout = 1; } + DBG_MSG("clearactive\n"); PTHREAD_WAITQ_CLEARACTIVE(); /* - * Check to see if the current thread needs to be added - * to the priority queue: - */ - if (add_to_prioq != 0) { - /* - * Save the current time as the time that the - * thread became inactive: - */ - current_tick = _sched_ticks; - curthread->last_inactive = (long)current_tick; - if (curthread->last_inactive < - curthread->last_active) { - /* Account for a rollover: */ - curthread->last_inactive =+ UINT_MAX + 1; - } - - if ((curthread->slice_usec != -1) && - (curthread->attr.sched_policy != SCHED_FIFO)) { - /* - * Accumulate the number of microseconds for - * which the current thread has run: - */ - curthread->slice_usec += - (curthread->last_inactive - - curthread->last_active) * - (long)_clock_res_usec; - /* Check for time quantum exceeded: */ - if (curthread->slice_usec > TIMESLICE_USEC) - curthread->slice_usec = -1; - } - - if (curthread->slice_usec == -1) { - /* - * The thread exceeded its time - * quantum or it yielded the CPU; - * place it at the tail of the - * queue for its priority. - */ - PTHREAD_PRIOQ_INSERT_TAIL(curthread); - } else { - /* - * The thread hasn't exceeded its - * interval. Place it at the head - * of the queue for its priority. - */ - PTHREAD_PRIOQ_INSERT_HEAD(curthread); - } - } - - /* * Get the highest priority thread in the ready queue. */ + DBG_MSG("Selecting thread\n"); pthread_h = PTHREAD_PRIOQ_FIRST(); /* Check if there are no threads ready to run: */ - if (pthread_h == NULL) { - /* - * Lock the pthread kernel by changing the pointer to - * the running thread to point to the global kernel - * thread structure: - */ - _set_curthread(&_thread_kern_thread); - curthread = &_thread_kern_thread; - - DBG_MSG("No runnable threads, using kernel thread %p\n", - curthread); - - /* - * There are no threads ready to run, so wait until - * something happens that changes this condition: - */ - thread_kern_idle(); - - /* - * This process' usage will likely be very small - * while waiting in a poll. Since the scheduling - * clock is based on the profiling timer, it is - * unlikely that the profiling timer will fire - * and update the time of day. To account for this, - * get the time of day after polling with a timeout. - */ - gettimeofday((struct timeval *) &_sched_tod, NULL); - - /* Check once more for a runnable thread: */ - pthread_h = PTHREAD_PRIOQ_FIRST(); - } - - if (pthread_h != NULL) { + if (pthread_h) { + DBG_MSG("Scheduling thread\n"); /* Remove the thread from the ready queue: */ PTHREAD_PRIOQ_REMOVE(pthread_h); /* Make the selected thread the current thread: */ _set_curthread(pthread_h); - curthread = pthread_h; /* * Save the current time as the time that the thread * became active: */ current_tick = _sched_ticks; - curthread->last_active = (long) current_tick; + pthread_h->last_active = (long) current_tick; /* * Check if this thread is running for the first time * or running again after using its full time slice * allocation: */ - if (curthread->slice_usec == -1) { + if (pthread_h->slice_usec == -1) { /* Reset the accumulated time slice period: */ - curthread->slice_usec = 0; + pthread_h->slice_usec = 0; } /* @@ -378,18 +332,47 @@ _thread_kern_scheduler(void) * installed switch hooks. */ if ((_sched_switch_hook != NULL) && - (_last_user_thread != curthread)) { + (_last_user_thread != pthread_h)) { thread_run_switch_hook(_last_user_thread, - curthread); + pthread_h); } /* * Continue the thread at its current frame: */ - swapcontext(&_thread_kern_sched_ctx, &curthread->ctx); + _last_user_thread = td; + DBG_MSG("switch in\n"); + _thread_switch(&pthread_h->mailbox, + &_thread_kern_kse_mailbox.km_curthread); + DBG_MSG("switch out\n"); + } else { + /* + * There is nothing for us to do. Either + * yield, or idle until something wakes up. + */ + DBG_MSG("No runnable threads, idling.\n"); + + /* + * kse_release() only returns if we are the + * only thread in this process. If so, then + * we drop into an idle loop. + */ + /* XXX: kse_release(); */ + thread_kern_idle(); + + /* + * This thread's usage will likely be very small + * while waiting in a poll. Since the scheduling + * clock is based on the profiling timer, it is + * unlikely that the profiling timer will fire + * and update the time of day. To account for this, + * get the time of day after polling with a timeout. + */ + gettimeofday((struct timeval *) &_sched_tod, NULL); } + DBG_MSG("looping\n"); } - - /* There are no more threads, so exit this process: */ + /* There are no threads; exit. */ + DBG_MSG("No threads, exiting.\n"); exit(0); } @@ -400,10 +383,10 @@ _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno) /* * Flag the pthread kernel as executing scheduler code - * to avoid a scheduler signal from interrupting this - * execution and calling the scheduler again. + * to avoid an upcall from interrupting this execution + * and calling the scheduler again. */ - _thread_kern_in_sched = 1; + _thread_kern_kse_mailbox.km_curthread = NULL; /* Change the state of the current thread: */ curthread->state = state; @@ -422,10 +405,10 @@ _thread_kern_sched_state_unlock(enum pthread_state state, /* * Flag the pthread kernel as executing scheduler code - * to avoid a scheduler signal from interrupting this - * execution and calling the scheduler again. + * to avoid an upcall from interrupting this execution + * and calling the scheduler again. */ - _thread_kern_in_sched = 1; + _thread_kern_kse_mailbox.km_curthread = NULL; /* Change the state of the current thread: */ curthread->state = state; @@ -438,6 +421,13 @@ _thread_kern_sched_state_unlock(enum pthread_state state, _thread_kern_sched(); } +/* + * XXX - What we need to do here is schedule ourselves an idle thread, + * which does the poll()/nanosleep()/whatever, and then will cause an + * upcall when it expires. This thread never gets inserted into the + * run_queue (in fact, there's no need for it to be a thread at all). + * timeout period has arrived. + */ static void thread_kern_idle() { @@ -459,6 +449,8 @@ thread_kern_idle() /* * Either there are no threads in the waiting queue, * or there are no threads that can timeout. + * + * XXX: kse_yield() here, maybe? */ PANIC("Would idle forever"); } diff --git a/lib/libkse/thread/thr_printf.c b/lib/libkse/thread/thr_printf.c new file mode 100644 index 0000000..0da9ae5 --- /dev/null +++ b/lib/libkse/thread/thr_printf.c @@ -0,0 +1,124 @@ +/*- + * Copyright (c) 2002 Jonathan Mini <mini@freebsd.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/types.h> +#include <sys/fcntl.h> +#include <sys/uio.h> +#include <errno.h> +#include <stdarg.h> +#include <unistd.h> +#include <pthread.h> + +#include "thr_private.h" + +static void pchar(int fd, char c); +static void pstr(int fd, const char *s); + +/* + * Write formatted output to stdout, in a thread-safe manner. + * + * Recognises the following conversions: + * %c -> char + * %d -> signed int (base 10) + * %s -> string + * %u -> unsigned int (base 10) + * %x -> unsigned int (base 16) + * %p -> unsigned int (base 16) + */ +void +_thread_printf(int fd, const char *fmt, ...) +{ + static const char digits[16] = "0123456789abcdef"; + va_list ap; + char buf[10]; + char *s; + unsigned r, u; + int c, d; + + va_start(ap, fmt); + while ((c = *fmt++)) { + if (c == '%') { + c = *fmt++; + switch (c) { + case 'c': + pchar(fd, va_arg(ap, int)); + continue; + case 's': + pstr(fd, va_arg(ap, char *)); + continue; + case 'd': + case 'u': + case 'p': + case 'x': + r = ((c == 'u') || (c == 'd')) ? 10 : 16; + if (c == 'd') { + d = va_arg(ap, unsigned); + if (d < 0) { + pchar(fd, '-'); + u = (unsigned)(d * -1); + } else + u = (unsigned)d; + } else + u = va_arg(ap, unsigned); + s = buf; + do { + *s++ = digits[u % r]; + } while (u /= r); + while (--s >= buf) + pchar(fd, *s); + continue; + } + } + pchar(fd, c); + } + va_end(ap); +} + +/* + * Write a single character to stdout, in a thread-safe manner. + */ +static void +pchar(int fd, char c) +{ + + write(fd, &c, 1); +} + +/* + * Write a string to stdout, in a thread-safe manner. + */ +static void +pstr(int fd, const char *s) +{ + + write(fd, s, strlen(s)); +} + diff --git a/lib/libkse/thread/thr_priority_queue.c b/lib/libkse/thread/thr_priority_queue.c index 1c1b269e..7dcd752 100644 --- a/lib/libkse/thread/thr_priority_queue.c +++ b/lib/libkse/thread/thr_priority_queue.c @@ -68,9 +68,9 @@ static int _pq_active = 0; if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \ PANIC(msg); \ } while (0) -#define _PQ_ASSERT_PROTECTED(msg) \ - PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \ - ((_get_curthread())->sig_defer_count > 0), \ +#define _PQ_ASSERT_PROTECTED(msg) \ + PTHREAD_ASSERT((_thread_kern_kse_mailbox.km_curthread == NULL) || \ + ((_get_curthread())->sig_defer_count > 0), \ msg); #else diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h index 2f49804..1388744 100644 --- a/lib/libkse/thread/thr_private.h +++ b/lib/libkse/thread/thr_private.h @@ -55,6 +55,7 @@ #include <sys/types.h> #include <sys/time.h> #include <sys/cdefs.h> +#include <sys/kse.h> #include <sched.h> #include <spinlock.h> #include <ucontext.h> @@ -67,18 +68,8 @@ /* Output debug messages like this: */ -#define stdout_debug(args...) do { \ - char buf[128]; \ - snprintf(buf, sizeof(buf), ##args); \ - __sys_write(1, buf, strlen(buf)); \ -} while (0) -#define stderr_debug(args...) do { \ - char buf[128]; \ - snprintf(buf, sizeof(buf), ##args); \ - __sys_write(2, buf, strlen(buf)); \ -} while (0) - - +#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, args) +#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, args) /* * Priority queue manipulation macros (using pqe link): @@ -511,7 +502,7 @@ struct pthread { /* * Machine context, including signal state. */ - ucontext_t ctx; + struct kse_thr_mailbox mailbox; /* * Cancelability flags - the lower 2 bits are used by cancel @@ -703,13 +694,6 @@ SCLASS TAILQ_HEAD(, pthread) _thread_list ; #endif -SCLASS int _thread_kern_in_sched -#ifdef GLOBAL_PTHREAD_PRIVATE -= 0; -#else -; -#endif - /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE @@ -817,7 +801,7 @@ SCLASS pthread_switch_routine_t _sched_switch_hook /* * Declare the kernel scheduler jump buffer and stack: */ -SCLASS ucontext_t _thread_kern_sched_ctx; +SCLASS struct kse_mailbox _thread_kern_kse_mailbox; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE @@ -890,15 +874,18 @@ void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(void); -void _thread_kern_scheduler(void); +void _thread_kern_scheduler(struct kse_mailbox *); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); +void _thread_printf(int fd, const char *, ...); void _thread_start(void); void _thread_seterrno(pthread_t, int); +int _thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km); +int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); @@ -1012,4 +999,4 @@ ssize_t __sys_write(int, const void *, size_t); __END_DECLS -#endif /* !_THR_PRIVATE_H */ +#endif /* !_PTHREAD_PRIVATE_H */ diff --git a/lib/libpthread/arch/i386/i386/thr_enter_uts.S b/lib/libpthread/arch/i386/i386/thr_enter_uts.S new file mode 100644 index 0000000..ad643a4 --- /dev/null +++ b/lib/libpthread/arch/i386/i386/thr_enter_uts.S @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2002 Jonathan Mini <mini@freebsd.org>. + * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <machine/asm.h> +__FBSDID("$FreeBSD$"); + +/* + * Where do we define these? + */ +#define MC_SIZE 640 /* sizeof mcontext_t */ +#define UC_MC_OFFSET 16 /* offset to mcontext from ucontext */ +#define MC_LEN_OFFSET 80 /* offset to mc_len from mcontext */ +#define MC_FP_CW_OFFSET 96 /* offset to FP control word */ +#define MC_OWNEDFP_OFFSET 88 /* offset to mc_ownedfp from mcontext */ +#define KM_STACK_SP_OFFSET 32 /* offset to km_stack.ss_sp */ +#define KM_STACK_SIZE_OFFSET 36 /* offset to km_stack.ss_sp */ +#define KM_FUNC_OFFSET 28 /* offset to km_func */ + +/* + * int _thread_enter_uts(kse_thr_mailbox *tm, kse_mailbox *km); + * + * Returns 0 on success, -1 otherwise. + */ +ENTRY(_thread_enter_uts) + movl 4(%esp), %eax /* get address of context */ + cmpl $0, %eax /* check for null pointer */ + jne 1f + movl $-1, %eax + jmp 2f +1: pushl %edx /* save value of edx */ + movl %eax, %edx /* get address of context */ + addl $UC_MC_OFFSET, %edx /* add offset to mcontext */ + movl %gs, 4(%edx) + movl %fs, 8(%edx) + movl %es, 12(%edx) + movl %ds, 16(%edx) + movl %edi, 20(%edx) + movl %esi, 24(%edx) + movl %ebp, 28(%edx) + movl %ebx, 36(%edx) + movl $0, 48(%edx) /* store successful return in eax */ + popl %eax /* get saved value of edx */ + movl %eax, 40(%edx) /* save edx */ + movl %ecx, 44(%edx) + movl (%esp), %eax /* get return address */ + movl %eax, 60(%edx) /* save return address */ + movl %ss, 76(%edx) + /* + * Don't save floating point registers here. + * + * This is an explicit call to get the current context, so + * the caller is done with the floating point registers. + * Contexts formed by involuntary switches, such as signal delivery, + * have floating point registers saved by the kernel. + */ + fnstcw MC_FP_CW_OFFSET(%edx) + movl $0, MC_OWNEDFP_OFFSET(%edx) /* no FP */ + lahf /* get eflags */ + movl %eax, 68(%edx) /* store eflags */ + movl %esp, %eax /* setcontext pushes the return */ + addl $4, %eax /* address onto the top of the */ + movl %eax, 72(%edx) /* stack; account for this */ + movl $MC_SIZE, MC_LEN_OFFSET(%edx) /* context is now valid */ + movl 8(%esp), %edx /* get address of mailbox */ + movl KM_STACK_SP_OFFSET(%edx), %eax /* get bottom of stack */ + addl KM_STACK_SIZE_OFFSET(%edx), %eax /* add length */ + movl %eax, %esp /* switch to the uts's stack */ + pushl %edx /* push the address of the mailbox */ + pushl KM_FUNC_OFFSET(%edx) /* .. the uts can return to itself */ + pushl KM_FUNC_OFFSET(%edx) /* push the address of the uts func */ +2: ret + diff --git a/lib/libpthread/arch/i386/i386/thr_switch.S b/lib/libpthread/arch/i386/i386/thr_switch.S new file mode 100644 index 0000000..a529f9d --- /dev/null +++ b/lib/libpthread/arch/i386/i386/thr_switch.S @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2002 Jonathan Mini <mini@freebsd.org>. + * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <machine/asm.h> +__FBSDID("$FreeBSD$"); + +/* + * Where do we define these? + */ +#define MC_SIZE 640 /* sizeof mcontext_t */ +#define UC_MC_OFFSET 16 /* offset to mcontext from ucontext */ +#define UC_MC_LEN_OFFSET 96 /* offset to mc_len from mcontext */ +#define MC_FP_REGS_OFFSET 96 /* offset to FP regs from mcontext */ +#define MC_FP_CW_OFFSET 96 /* offset to FP control word */ +#define MC_OWNEDFP_OFFSET 88 /* offset to mc_ownedfp from mcontext */ + +/* + * int _thread_switch(kse_thr_mailbox *td, thread_mailbox **curthreadp); + * + * Does not return on success, returns -1 otherwise. + */ +ENTRY(_thread_switch) + movl 4(%esp), %edx /* get address of thread_mailbox */ + /* .. ucontext_t is at offset 0 */ + cmpl $0, %edx /* check for null pointer */ + jne 1f + movl $-1, %eax + jmp 5f +1: cmpl $MC_SIZE, UC_MC_LEN_OFFSET(%edx) /* is context valid? */ + je 2f + movl $-1, %eax /* bzzzt, invalid context */ + jmp 5f +2: movl 8(%esp), %eax /* get address of curthreadp */ + movl %edx, (%eax) /* we're now the current thread */ + /* + * From here on, we don't touch the old stack. + */ + addl $UC_MC_OFFSET, %edx /* add offset to mcontext */ + movl 4(%edx), %gs + movl 8(%edx), %fs + movl 12(%edx), %es + movl 16(%edx), %ds + movl 76(%edx), %ss + movl 20(%edx), %edi + movl 24(%edx), %esi + movl 28(%edx), %ebp + movl 72(%edx), %esp /* switch to context defined stack */ + subl $4, %esp /* leave space for the return address */ + movl 60(%edx), %eax /* put return address at top of stack */ + movl %eax, (%esp) + cmpl $0, MC_OWNEDFP_OFFSET(%edx) /* are FP regs valid? */ + jz 3f + frstor MC_FP_REGS_OFFSET(%edx) /* restore FP regs */ + jmp 4f +3: fninit + fldcw MC_FP_CW_OFFSET(%edx) +4: movl 48(%edx), %eax /* restore ax, bx, cx */ + movl 36(%edx), %ebx + movl 44(%edx), %ecx + pushl 68(%edx) /* flags on stack */ + pushl 40(%edx) /* %edx on stack */ + popl %edx /* %edx off stack */ + popf /* flags off stack */ +5: ret /* %eip off stack */ + diff --git a/lib/libpthread/sys/Makefile.inc b/lib/libpthread/sys/Makefile.inc index bfcb3d9..2945285 100644 --- a/lib/libpthread/sys/Makefile.inc +++ b/lib/libpthread/sys/Makefile.inc @@ -2,5 +2,4 @@ .PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} -SRCS+= thr_error.c _atomic_lock.S - +SRCS+= thr_error.c _atomic_lock.S thr_enter_uts.S thr_switch.S diff --git a/lib/libpthread/thread/Makefile.inc b/lib/libpthread/thread/Makefile.inc index 6941c6a..7b514c8 100644 --- a/lib/libpthread/thread/Makefile.inc +++ b/lib/libpthread/thread/Makefile.inc @@ -62,6 +62,7 @@ SRCS+= \ thr_open.c \ thr_pause.c \ thr_poll.c \ + thr_printf.c \ thr_priority_queue.c \ thr_pselect.c \ thr_read.c \ diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c index 0e72e96..c9f5a42 100644 --- a/lib/libpthread/thread/thr_create.c +++ b/lib/libpthread/thread/thr_create.c @@ -50,7 +50,7 @@ int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); -int _thread_ctx_offset = OFF(ctx); +int _thread_ctx_offset = OFF(mailbox.tm_context); #undef OFF int _thread_PS_RUNNING_value = PS_RUNNING; @@ -122,11 +122,14 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, new_thread->magic = PTHREAD_MAGIC; /* Initialise the machine context: */ - getcontext(&new_thread->ctx); - new_thread->ctx.uc_stack.ss_sp = new_thread->stack; - new_thread->ctx.uc_stack.ss_size = + getcontext(&new_thread->mailbox.tm_context); + new_thread->mailbox.tm_context.uc_stack.ss_sp = + new_thread->stack; + new_thread->mailbox.tm_context.uc_stack.ss_size = pattr->stacksize_attr; - makecontext(&new_thread->ctx, _thread_start, 1); + makecontext(&new_thread->mailbox.tm_context, + _thread_start, 1); + new_thread->mailbox.tm_udata = (void *)new_thread; /* Copy the thread attributes: */ memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr)); @@ -230,9 +233,6 @@ _thread_start(void) { struct pthread *curthread = _get_curthread(); - /* We just left the scheduler via swapcontext: */ - _thread_kern_in_sched = 0; - /* Run the current thread's start routine with argument: */ pthread_exit(curthread->start_routine(curthread->arg)); diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c index 9f6a533..4dd356a 100644 --- a/lib/libpthread/thread/thr_init.c +++ b/lib/libpthread/thread/thr_init.c @@ -266,15 +266,12 @@ _thread_init(void) _thread_initial->attr.stackaddr_attr = _thread_initial->stack; _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL; - /* Setup the context for the scheduler: */ - getcontext(&_thread_kern_sched_ctx); - _thread_kern_sched_ctx.uc_stack.ss_sp = + /* Setup the context for the scheduler. */ + _thread_kern_kse_mailbox.km_stack.ss_sp = _thread_kern_sched_stack; - _thread_kern_sched_ctx.uc_stack.ss_size = sched_stack_size; - makecontext(&_thread_kern_sched_ctx, _thread_kern_scheduler, 1); - - /* Block all signals to the scheduler's context. */ - sigfillset(&_thread_kern_sched_ctx.uc_sigmask); + _thread_kern_kse_mailbox.km_stack.ss_size = sched_stack_size; + _thread_kern_kse_mailbox.km_func = + (void *)_thread_kern_scheduler; /* * Write a magic value to the thread structure @@ -287,9 +284,12 @@ _thread_init(void) PTHREAD_CANCEL_DEFERRED; /* Setup the context for initial thread. */ - getcontext(&_thread_initial->ctx); - _thread_kern_sched_ctx.uc_stack.ss_sp = _thread_initial->stack; - _thread_kern_sched_ctx.uc_stack.ss_size = PTHREAD_STACK_INITIAL; + getcontext(&_thread_initial->mailbox.tm_context); + _thread_initial->mailbox.tm_context.uc_stack.ss_sp = + _thread_initial->stack; + _thread_initial->mailbox.tm_context.uc_stack.ss_size = + PTHREAD_STACK_INITIAL; + _thread_initial->mailbox.tm_udata = (void *)_thread_initial; /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; @@ -334,6 +334,11 @@ _thread_init(void) _clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ? clockinfo.tick : CLOCK_RES_USEC_MIN; + /* Start KSE. */ + _thread_kern_kse_mailbox.km_curthread = + &_thread_initial->mailbox; + if (kse_create(&_thread_kern_kse_mailbox, 0) != 0) + PANIC("kse_new failed"); } /* Initialise the garbage collector mutex and condition variable. */ diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c index 36ed8d8..d804a0c 100644 --- a/lib/libpthread/thread/thr_kern.c +++ b/lib/libpthread/thread/thr_kern.c @@ -73,27 +73,137 @@ static int last_tick = 0; void _thread_kern_sched(void) { + struct timespec ts; + struct timeval tv; struct pthread *curthread = _get_curthread(); + unsigned int current_tick; + + /* Get the current time of day. */ + GET_CURRENT_TOD(tv); + TIMEVAL_TO_TIMESPEC(&tv, &ts); + current_tick = _sched_ticks; /* - * Flag the pthread kernel as executing scheduler code - * to avoid a scheduler signal from interrupting this - * execution and calling the scheduler again. + * Enter a critical section. + */ + _thread_kern_kse_mailbox.km_curthread = NULL; + + /* + * If this thread is becoming inactive, make note of the + * time. + */ + if (curthread->state != PS_RUNNING) { + /* + * Save the current time as the time that the + * thread became inactive: + */ + curthread->last_inactive = (long)current_tick; + if (curthread->last_inactive < + curthread->last_active) { + /* Account for a rollover: */ + curthread->last_inactive =+ + UINT_MAX + 1; + } + } + + /* + * Place this thread into the appropriate queue(s). */ - _thread_kern_in_sched = 1; + switch (curthread->state) { + case PS_DEAD: + case PS_STATE_MAX: /* XXX: silences -Wall */ + case PS_SUSPENDED: + /* Dead or suspended threads are not placed in any queue. */ + break; + case PS_RUNNING: + /* + * Save the current time as the time that the + * thread became inactive: + */ + current_tick = _sched_ticks; + curthread->last_inactive = (long)current_tick; + if (curthread->last_inactive < + curthread->last_active) { + /* Account for a rollover: */ + curthread->last_inactive =+ UINT_MAX + 1; + } + + if ((curthread->slice_usec != -1) && + (curthread->attr.sched_policy != SCHED_FIFO)) { + /* + * Accumulate the number of microseconds for + * which the current thread has run: + */ + curthread->slice_usec += + (curthread->last_inactive - + curthread->last_active) * + (long)_clock_res_usec; + /* Check for time quantum exceeded: */ + if (curthread->slice_usec > TIMESLICE_USEC) + curthread->slice_usec = -1; + } + + if (curthread->slice_usec == -1) { + /* + * The thread exceeded its time + * quantum or it yielded the CPU; + * place it at the tail of the + * queue for its priority. + */ + PTHREAD_PRIOQ_INSERT_TAIL(curthread); + } else { + /* + * The thread hasn't exceeded its + * interval. Place it at the head + * of the queue for its priority. + */ + PTHREAD_PRIOQ_INSERT_HEAD(curthread); + } + break; + case PS_SPINBLOCK: + /* Increment spinblock count. */ + _spinblock_count++; + /*FALLTHROUGH*/ + case PS_DEADLOCK: + case PS_JOIN: + case PS_MUTEX_WAIT: + case PS_WAIT_WAIT: + /* No timeouts for these states. */ + curthread->wakeup_time.tv_sec = -1; + curthread->wakeup_time.tv_nsec = -1; + + /* Restart the time slice. */ + curthread->slice_usec = -1; + + /* Insert into the waiting queue. */ + PTHREAD_WAITQ_INSERT(curthread); + break; + + case PS_COND_WAIT: + case PS_SLEEP_WAIT: + /* These states can timeout. */ + /* Restart the time slice. */ + curthread->slice_usec = -1; + + /* Insert into the waiting queue. */ + PTHREAD_WAITQ_INSERT(curthread); + break; + } /* Switch into the scheduler's context. */ - swapcontext(&curthread->ctx, &_thread_kern_sched_ctx); - DBG_MSG("Returned from swapcontext, thread %p\n", curthread); + DBG_MSG("Calling _thread_enter_uts()\n"); + _thread_enter_uts(&curthread->mailbox, &_thread_kern_kse_mailbox); + DBG_MSG("Returned from _thread_enter_uts, thread %p\n", curthread); /* - * This point is reached when swapcontext() is called + * This point is reached when _thread_switch() is called * to restore the state of a thread. * - * This is the normal way out of the scheduler. + * This is the normal way out of the scheduler (for synchronous + * switches). */ - _thread_kern_in_sched = 0; + /* XXXKSE: Do this inside _thread_kern_scheduler() */ if (curthread->sig_defer_count == 0) { if (((curthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && @@ -117,125 +227,47 @@ _thread_kern_sched(void) } void -_thread_kern_scheduler(void) +_thread_kern_scheduler(struct kse_mailbox *km) { struct timespec ts; struct timeval tv; - struct pthread *curthread = _get_curthread(); - pthread_t pthread, pthread_h; + pthread_t td, pthread, pthread_h; unsigned int current_tick; - int add_to_prioq; + struct kse_thr_mailbox *tm, *p; - /* - * Enter a scheduling loop that finds the next thread that is - * ready to run. This loop completes when there are no more threads - * in the global list. It is interrupted each time a thread is - * scheduled, but will continue when we return. - */ - while (!(TAILQ_EMPTY(&_thread_list))) { + DBG_MSG("entering\n"); + while (!TAILQ_EMPTY(&_thread_list)) { - /* If the currently running thread is a user thread, save it: */ - if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0) - _last_user_thread = curthread; - - /* Get the current time of day: */ + /* Get the current time of day. */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); current_tick = _sched_ticks; - add_to_prioq = 0; - if (curthread != &_thread_kern_thread) { - /* - * This thread no longer needs to yield the CPU. - */ - if (curthread->state != PS_RUNNING) { - /* - * Save the current time as the time that the - * thread became inactive: - */ - curthread->last_inactive = (long)current_tick; - if (curthread->last_inactive < - curthread->last_active) { - /* Account for a rollover: */ - curthread->last_inactive =+ - UINT_MAX + 1; - } - } - - /* - * Place the currently running thread into the - * appropriate queue(s). - */ - switch (curthread->state) { - case PS_DEAD: - case PS_STATE_MAX: /* to silence -Wall */ - case PS_SUSPENDED: - /* - * Dead and suspended threads are not placed - * in any queue: - */ - break; - - case PS_RUNNING: - /* - * Runnable threads can't be placed in the - * priority queue until after waiting threads - * are polled (to preserve round-robin - * scheduling). - */ - add_to_prioq = 1; - break; - - /* - * States which do not depend on file descriptor I/O - * operations or timeouts: - */ - case PS_DEADLOCK: - case PS_JOIN: - case PS_MUTEX_WAIT: - case PS_WAIT_WAIT: - /* No timeouts for these states: */ - curthread->wakeup_time.tv_sec = -1; - curthread->wakeup_time.tv_nsec = -1; - - /* Restart the time slice: */ - curthread->slice_usec = -1; - - /* Insert into the waiting queue: */ - PTHREAD_WAITQ_INSERT(curthread); - break; - - /* States which can timeout: */ - case PS_COND_WAIT: - case PS_SLEEP_WAIT: - /* Restart the time slice: */ - curthread->slice_usec = -1; - - /* Insert into the waiting queue: */ - PTHREAD_WAITQ_INSERT(curthread); - break; - - /* States that require periodic work: */ - case PS_SPINBLOCK: - /* No timeouts for this state: */ - curthread->wakeup_time.tv_sec = -1; - curthread->wakeup_time.tv_nsec = -1; - - /* Increment spinblock count: */ - _spinblock_count++; - - /* FALLTHROUGH */ - } + /* + * Pick up threads that had blocked in the kernel and + * have now completed their trap (syscall, vm fault, etc). + * These threads were PS_RUNNING (and still are), but they + * need to be added to the run queue so that they can be + * scheduled again. + */ + DBG_MSG("Picking up km_completed\n"); + p = km->km_completed; + km->km_completed = NULL; /* XXX: Atomic xchg here. */ + while ((tm = p) != NULL) { + p = tm->tm_next; + tm->tm_next = NULL; + DBG_MSG("\tmailbox=%p pthread=%p\n", tm, tm->tm_udata); + PTHREAD_PRIOQ_INSERT_TAIL((pthread_t)tm->tm_udata); } - last_tick = current_tick; + /* Deliver posted signals. */ + /* XXX: Not yet. */ + DBG_MSG("Picking up signals\n"); - /* - * Wake up threads that have timedout. This has to be - * done after polling in case a thread does a poll or - * select with zero time. - */ + /* Wake up threads that have timed out. */ + DBG_MSG("setactive\n"); PTHREAD_WAITQ_SETACTIVE(); + DBG_MSG("Picking up timeouts (%x)\n", TAILQ_FIRST(&_waitingq)); while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) && (pthread->wakeup_time.tv_sec != -1) && (((pthread->wakeup_time.tv_sec == 0) && @@ -243,6 +275,7 @@ _thread_kern_scheduler(void) (pthread->wakeup_time.tv_sec < ts.tv_sec) || ((pthread->wakeup_time.tv_sec == ts.tv_sec) && (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) { + DBG_MSG("\t...\n"); /* * Remove this thread from the waiting queue * (and work queue if necessary) and place it @@ -251,6 +284,7 @@ _thread_kern_scheduler(void) PTHREAD_WAITQ_CLEARACTIVE(); if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ) PTHREAD_WORKQ_REMOVE(pthread); + DBG_MSG("\twaking thread\n"); PTHREAD_NEW_STATE(pthread, PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* @@ -258,119 +292,39 @@ _thread_kern_scheduler(void) */ pthread->timeout = 1; } + DBG_MSG("clearactive\n"); PTHREAD_WAITQ_CLEARACTIVE(); /* - * Check to see if the current thread needs to be added - * to the priority queue: - */ - if (add_to_prioq != 0) { - /* - * Save the current time as the time that the - * thread became inactive: - */ - current_tick = _sched_ticks; - curthread->last_inactive = (long)current_tick; - if (curthread->last_inactive < - curthread->last_active) { - /* Account for a rollover: */ - curthread->last_inactive =+ UINT_MAX + 1; - } - - if ((curthread->slice_usec != -1) && - (curthread->attr.sched_policy != SCHED_FIFO)) { - /* - * Accumulate the number of microseconds for - * which the current thread has run: - */ - curthread->slice_usec += - (curthread->last_inactive - - curthread->last_active) * - (long)_clock_res_usec; - /* Check for time quantum exceeded: */ - if (curthread->slice_usec > TIMESLICE_USEC) - curthread->slice_usec = -1; - } - - if (curthread->slice_usec == -1) { - /* - * The thread exceeded its time - * quantum or it yielded the CPU; - * place it at the tail of the - * queue for its priority. - */ - PTHREAD_PRIOQ_INSERT_TAIL(curthread); - } else { - /* - * The thread hasn't exceeded its - * interval. Place it at the head - * of the queue for its priority. - */ - PTHREAD_PRIOQ_INSERT_HEAD(curthread); - } - } - - /* * Get the highest priority thread in the ready queue. */ + DBG_MSG("Selecting thread\n"); pthread_h = PTHREAD_PRIOQ_FIRST(); /* Check if there are no threads ready to run: */ - if (pthread_h == NULL) { - /* - * Lock the pthread kernel by changing the pointer to - * the running thread to point to the global kernel - * thread structure: - */ - _set_curthread(&_thread_kern_thread); - curthread = &_thread_kern_thread; - - DBG_MSG("No runnable threads, using kernel thread %p\n", - curthread); - - /* - * There are no threads ready to run, so wait until - * something happens that changes this condition: - */ - thread_kern_idle(); - - /* - * This process' usage will likely be very small - * while waiting in a poll. Since the scheduling - * clock is based on the profiling timer, it is - * unlikely that the profiling timer will fire - * and update the time of day. To account for this, - * get the time of day after polling with a timeout. - */ - gettimeofday((struct timeval *) &_sched_tod, NULL); - - /* Check once more for a runnable thread: */ - pthread_h = PTHREAD_PRIOQ_FIRST(); - } - - if (pthread_h != NULL) { + if (pthread_h) { + DBG_MSG("Scheduling thread\n"); /* Remove the thread from the ready queue: */ PTHREAD_PRIOQ_REMOVE(pthread_h); /* Make the selected thread the current thread: */ _set_curthread(pthread_h); - curthread = pthread_h; /* * Save the current time as the time that the thread * became active: */ current_tick = _sched_ticks; - curthread->last_active = (long) current_tick; + pthread_h->last_active = (long) current_tick; /* * Check if this thread is running for the first time * or running again after using its full time slice * allocation: */ - if (curthread->slice_usec == -1) { + if (pthread_h->slice_usec == -1) { /* Reset the accumulated time slice period: */ - curthread->slice_usec = 0; + pthread_h->slice_usec = 0; } /* @@ -378,18 +332,47 @@ _thread_kern_scheduler(void) * installed switch hooks. */ if ((_sched_switch_hook != NULL) && - (_last_user_thread != curthread)) { + (_last_user_thread != pthread_h)) { thread_run_switch_hook(_last_user_thread, - curthread); + pthread_h); } /* * Continue the thread at its current frame: */ - swapcontext(&_thread_kern_sched_ctx, &curthread->ctx); + _last_user_thread = td; + DBG_MSG("switch in\n"); + _thread_switch(&pthread_h->mailbox, + &_thread_kern_kse_mailbox.km_curthread); + DBG_MSG("switch out\n"); + } else { + /* + * There is nothing for us to do. Either + * yield, or idle until something wakes up. + */ + DBG_MSG("No runnable threads, idling.\n"); + + /* + * kse_release() only returns if we are the + * only thread in this process. If so, then + * we drop into an idle loop. + */ + /* XXX: kse_release(); */ + thread_kern_idle(); + + /* + * This thread's usage will likely be very small + * while waiting in a poll. Since the scheduling + * clock is based on the profiling timer, it is + * unlikely that the profiling timer will fire + * and update the time of day. To account for this, + * get the time of day after polling with a timeout. + */ + gettimeofday((struct timeval *) &_sched_tod, NULL); } + DBG_MSG("looping\n"); } - - /* There are no more threads, so exit this process: */ + /* There are no threads; exit. */ + DBG_MSG("No threads, exiting.\n"); exit(0); } @@ -400,10 +383,10 @@ _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno) /* * Flag the pthread kernel as executing scheduler code - * to avoid a scheduler signal from interrupting this - * execution and calling the scheduler again. + * to avoid an upcall from interrupting this execution + * and calling the scheduler again. */ - _thread_kern_in_sched = 1; + _thread_kern_kse_mailbox.km_curthread = NULL; /* Change the state of the current thread: */ curthread->state = state; @@ -422,10 +405,10 @@ _thread_kern_sched_state_unlock(enum pthread_state state, /* * Flag the pthread kernel as executing scheduler code - * to avoid a scheduler signal from interrupting this - * execution and calling the scheduler again. + * to avoid an upcall from interrupting this execution + * and calling the scheduler again. */ - _thread_kern_in_sched = 1; + _thread_kern_kse_mailbox.km_curthread = NULL; /* Change the state of the current thread: */ curthread->state = state; @@ -438,6 +421,13 @@ _thread_kern_sched_state_unlock(enum pthread_state state, _thread_kern_sched(); } +/* + * XXX - What we need to do here is schedule ourselves an idle thread, + * which does the poll()/nanosleep()/whatever, and then will cause an + * upcall when it expires. This thread never gets inserted into the + * run_queue (in fact, there's no need for it to be a thread at all). + * timeout period has arrived. + */ static void thread_kern_idle() { @@ -459,6 +449,8 @@ thread_kern_idle() /* * Either there are no threads in the waiting queue, * or there are no threads that can timeout. + * + * XXX: kse_yield() here, maybe? */ PANIC("Would idle forever"); } diff --git a/lib/libpthread/thread/thr_printf.c b/lib/libpthread/thread/thr_printf.c new file mode 100644 index 0000000..0da9ae5 --- /dev/null +++ b/lib/libpthread/thread/thr_printf.c @@ -0,0 +1,124 @@ +/*- + * Copyright (c) 2002 Jonathan Mini <mini@freebsd.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/types.h> +#include <sys/fcntl.h> +#include <sys/uio.h> +#include <errno.h> +#include <stdarg.h> +#include <unistd.h> +#include <pthread.h> + +#include "thr_private.h" + +static void pchar(int fd, char c); +static void pstr(int fd, const char *s); + +/* + * Write formatted output to stdout, in a thread-safe manner. + * + * Recognises the following conversions: + * %c -> char + * %d -> signed int (base 10) + * %s -> string + * %u -> unsigned int (base 10) + * %x -> unsigned int (base 16) + * %p -> unsigned int (base 16) + */ +void +_thread_printf(int fd, const char *fmt, ...) +{ + static const char digits[16] = "0123456789abcdef"; + va_list ap; + char buf[10]; + char *s; + unsigned r, u; + int c, d; + + va_start(ap, fmt); + while ((c = *fmt++)) { + if (c == '%') { + c = *fmt++; + switch (c) { + case 'c': + pchar(fd, va_arg(ap, int)); + continue; + case 's': + pstr(fd, va_arg(ap, char *)); + continue; + case 'd': + case 'u': + case 'p': + case 'x': + r = ((c == 'u') || (c == 'd')) ? 10 : 16; + if (c == 'd') { + d = va_arg(ap, unsigned); + if (d < 0) { + pchar(fd, '-'); + u = (unsigned)(d * -1); + } else + u = (unsigned)d; + } else + u = va_arg(ap, unsigned); + s = buf; + do { + *s++ = digits[u % r]; + } while (u /= r); + while (--s >= buf) + pchar(fd, *s); + continue; + } + } + pchar(fd, c); + } + va_end(ap); +} + +/* + * Write a single character to stdout, in a thread-safe manner. + */ +static void +pchar(int fd, char c) +{ + + write(fd, &c, 1); +} + +/* + * Write a string to stdout, in a thread-safe manner. + */ +static void +pstr(int fd, const char *s) +{ + + write(fd, s, strlen(s)); +} + diff --git a/lib/libpthread/thread/thr_priority_queue.c b/lib/libpthread/thread/thr_priority_queue.c index 1c1b269e..7dcd752 100644 --- a/lib/libpthread/thread/thr_priority_queue.c +++ b/lib/libpthread/thread/thr_priority_queue.c @@ -68,9 +68,9 @@ static int _pq_active = 0; if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \ PANIC(msg); \ } while (0) -#define _PQ_ASSERT_PROTECTED(msg) \ - PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \ - ((_get_curthread())->sig_defer_count > 0), \ +#define _PQ_ASSERT_PROTECTED(msg) \ + PTHREAD_ASSERT((_thread_kern_kse_mailbox.km_curthread == NULL) || \ + ((_get_curthread())->sig_defer_count > 0), \ msg); #else diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h index 2f49804..1388744 100644 --- a/lib/libpthread/thread/thr_private.h +++ b/lib/libpthread/thread/thr_private.h @@ -55,6 +55,7 @@ #include <sys/types.h> #include <sys/time.h> #include <sys/cdefs.h> +#include <sys/kse.h> #include <sched.h> #include <spinlock.h> #include <ucontext.h> @@ -67,18 +68,8 @@ /* Output debug messages like this: */ -#define stdout_debug(args...) do { \ - char buf[128]; \ - snprintf(buf, sizeof(buf), ##args); \ - __sys_write(1, buf, strlen(buf)); \ -} while (0) -#define stderr_debug(args...) do { \ - char buf[128]; \ - snprintf(buf, sizeof(buf), ##args); \ - __sys_write(2, buf, strlen(buf)); \ -} while (0) - - +#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, args) +#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, args) /* * Priority queue manipulation macros (using pqe link): @@ -511,7 +502,7 @@ struct pthread { /* * Machine context, including signal state. */ - ucontext_t ctx; + struct kse_thr_mailbox mailbox; /* * Cancelability flags - the lower 2 bits are used by cancel @@ -703,13 +694,6 @@ SCLASS TAILQ_HEAD(, pthread) _thread_list ; #endif -SCLASS int _thread_kern_in_sched -#ifdef GLOBAL_PTHREAD_PRIVATE -= 0; -#else -; -#endif - /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE @@ -817,7 +801,7 @@ SCLASS pthread_switch_routine_t _sched_switch_hook /* * Declare the kernel scheduler jump buffer and stack: */ -SCLASS ucontext_t _thread_kern_sched_ctx; +SCLASS struct kse_mailbox _thread_kern_kse_mailbox; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE @@ -890,15 +874,18 @@ void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(void); -void _thread_kern_scheduler(void); +void _thread_kern_scheduler(struct kse_mailbox *); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); +void _thread_printf(int fd, const char *, ...); void _thread_start(void); void _thread_seterrno(pthread_t, int); +int _thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km); +int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); @@ -1012,4 +999,4 @@ ssize_t __sys_write(int, const void *, size_t); __END_DECLS -#endif /* !_THR_PRIVATE_H */ +#endif /* !_PTHREAD_PRIVATE_H */ |