diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/imgact_elf.c | 8 | ||||
-rw-r--r-- | sys/kern/init_sysent.c | 12 | ||||
-rw-r--r-- | sys/kern/kern_clock.c | 8 | ||||
-rw-r--r-- | sys/kern/kern_exec.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_intr.c | 1 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 1445 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_resource.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 157 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 14 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 129 | ||||
-rw-r--r-- | sys/kern/p1003_1b.c | 4 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 2 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 9 | ||||
-rw-r--r-- | sys/kern/subr_sleepqueue.c | 12 | ||||
-rw-r--r-- | sys/kern/subr_trap.c | 24 | ||||
-rw-r--r-- | sys/kern/subr_witness.c | 1 | ||||
-rw-r--r-- | sys/kern/sys_process.c | 15 | ||||
-rw-r--r-- | sys/kern/syscalls.c | 12 | ||||
-rw-r--r-- | sys/kern/syscalls.master | 17 | ||||
-rw-r--r-- | sys/kern/systrace_args.c | 44 |
23 files changed, 34 insertions, 1892 deletions
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index ff1bcda..42f82c8 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -480,9 +480,6 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long base_addr = 0; int vfslocked, error, i, numsegs; - if (curthread->td_proc != p) - panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */ - tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); nd = &tempdata->nd; attr = &tempdata->attr; @@ -498,7 +495,6 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr, imgp->object = NULL; imgp->execlabel = NULL; - /* XXXKSE */ NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread); vfslocked = 0; @@ -999,7 +995,7 @@ __elfN(coredump)(td, vp, limit) (caddr_t)(uintptr_t)php->p_vaddr, php->p_filesz, offset, UIO_USERSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, - curthread); /* XXXKSE */ + curthread); if (error != 0) break; offset += php->p_filesz; @@ -1147,7 +1143,7 @@ __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize) /* Write it to the core file. */ return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, - td)); /* XXXKSE */ + td)); } #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c index 37dd36b..5c34f2e 100644 --- a/sys/kern/init_sysent.c +++ b/sys/kern/init_sysent.c @@ -408,11 +408,11 @@ struct sysent sysent[] = { { AS(eaccess_args), (sy_call_t *)eaccess, AUE_EACCESS, NULL, 0, 0 }, /* 376 = eaccess */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 377 = afs_syscall */ { AS(nmount_args), (sy_call_t *)nmount, AUE_NMOUNT, NULL, 0, 0 }, /* 378 = nmount */ - { 0, (sy_call_t *)kse_exit, AUE_NULL, NULL, 0, 0 }, /* 379 = kse_exit */ - { AS(kse_wakeup_args), (sy_call_t *)kse_wakeup, AUE_NULL, NULL, 0, 0 }, /* 380 = kse_wakeup */ - { AS(kse_create_args), (sy_call_t *)kse_create, AUE_NULL, NULL, 0, 0 }, /* 381 = kse_create */ - { AS(kse_thr_interrupt_args), (sy_call_t *)kse_thr_interrupt, AUE_NULL, NULL, 0, 0 }, /* 382 = kse_thr_interrupt */ - { AS(kse_release_args), (sy_call_t *)kse_release, AUE_NULL, NULL, 0, 0 }, /* 383 = kse_release */ + { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 379 = kse_exit */ + { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 380 = kse_wakeup */ + { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 381 = kse_create */ + { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 382 = kse_thr_interrupt */ + { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 383 = kse_release */ { AS(__mac_get_proc_args), (sy_call_t *)__mac_get_proc, AUE_NULL, NULL, 0, 0 }, /* 384 = __mac_get_proc */ { AS(__mac_set_proc_args), (sy_call_t *)__mac_set_proc, AUE_NULL, NULL, 0, 0 }, /* 385 = __mac_set_proc */ { AS(__mac_get_fd_args), (sy_call_t *)__mac_get_fd, AUE_NULL, NULL, 0, 0 }, /* 386 = __mac_get_fd */ @@ -469,7 +469,7 @@ struct sysent sysent[] = { { AS(extattr_list_fd_args), (sy_call_t *)extattr_list_fd, AUE_EXTATTR_LIST_FD, NULL, 0, 0 }, /* 437 = extattr_list_fd */ { AS(extattr_list_file_args), (sy_call_t *)extattr_list_file, AUE_EXTATTR_LIST_FILE, NULL, 0, 0 }, /* 438 = extattr_list_file */ { AS(extattr_list_link_args), (sy_call_t *)extattr_list_link, AUE_EXTATTR_LIST_LINK, NULL, 0, 0 }, /* 439 = extattr_list_link */ - { AS(kse_switchin_args), (sy_call_t *)kse_switchin, AUE_NULL, NULL, 0, 0 }, /* 440 = kse_switchin */ + { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 440 = kse_switchin */ { AS(ksem_timedwait_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0 }, /* 441 = ksem_timedwait */ { AS(thr_suspend_args), (sy_call_t *)thr_suspend, AUE_NULL, NULL, 0, 0 }, /* 442 = thr_suspend */ { AS(thr_wake_args), (sy_call_t *)thr_wake, AUE_NULL, NULL, 0, 0 }, /* 443 = thr_wake */ diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index fb0ca7e..c92d7de 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -477,10 +477,6 @@ statclock(int usermode) /* * Charge the time as appropriate. */ -#ifdef KSE - if (p->p_flag & P_SA) - thread_statclock(1); -#endif td->td_uticks++; if (p->p_nice > NZERO) cp_time[CP_NICE]++; @@ -504,10 +500,6 @@ statclock(int usermode) td->td_iticks++; cp_time[CP_INTR]++; } else { -#ifdef KSE - if (p->p_flag & P_SA) - thread_statclock(0); -#endif td->td_pticks++; td->td_sticks++; if (!TD_IS_IDLETHREAD(td)) diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 2f81fcd..a972cb9 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -1194,7 +1194,7 @@ exec_check_permissions(imgp) struct thread *td; int error; - td = curthread; /* XXXKSE */ + td = curthread; /* Get file attributes */ error = VOP_GETATTR(vp, attr, td->td_ucred, td); diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index e6f891d..e59a1f4 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -764,7 +764,7 @@ fork_exit(callout, arg, frame) p = td->td_proc; KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); - CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)", + CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)", td, td->td_sched, p->p_pid, td->td_name); sched_fork_exit(td); diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c index 17af1ee..d9b983a 100644 --- a/sys/kern/kern_intr.c +++ b/sys/kern/kern_intr.c @@ -905,7 +905,6 @@ swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, } return (intr_event_add_handler(ie, name, NULL, handler, arg, (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); - /* XXKSE.. think of a better way to get separate queues */ } /* diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c deleted file mode 100644 index 787aa49..0000000 --- a/sys/kern/kern_kse.c +++ /dev/null @@ -1,1445 +0,0 @@ -/*- - * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice(s), this list of conditions and the following disclaimer as - * the first lines of this file unmodified other than the possible - * addition of one or more copyright notices. - * 2. Redistributions in binary form must reproduce the above copyright - * notice(s), this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - */ - -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - -#include <sys/param.h> -#include <sys/systm.h> -#include <sys/kernel.h> -#include <sys/imgact.h> -#include <sys/lock.h> -#include <sys/mutex.h> -#include <sys/proc.h> -#include <sys/ptrace.h> -#include <sys/smp.h> -#include <sys/syscallsubr.h> -#include <sys/sysproto.h> -#include <sys/sched.h> -#include <sys/signalvar.h> -#include <sys/sleepqueue.h> -#include <sys/syslog.h> -#include <sys/kse.h> -#include <sys/ktr.h> -#include <vm/uma.h> - -#ifdef KSE -static uma_zone_t upcall_zone; - -/* DEBUG ONLY */ -extern int virtual_cpu; -extern int thread_debug; - -extern int max_threads_per_proc; -extern int max_groups_per_proc; -extern int max_threads_hits; -extern struct mtx kse_lock; - - -TAILQ_HEAD(, kse_upcall) zombie_upcalls = - TAILQ_HEAD_INITIALIZER(zombie_upcalls); - -static int thread_update_usr_ticks(struct thread *td); -static int thread_alloc_spare(struct thread *td); -static struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku); -static struct kse_upcall *upcall_alloc(void); - - -struct mtx kse_lock; -MTX_SYSINIT(kse_lock, &kse_lock, "kse lock", MTX_SPIN); - -struct kse_upcall * -upcall_alloc(void) -{ - struct kse_upcall *ku; - - ku = uma_zalloc(upcall_zone, M_WAITOK | M_ZERO); - return (ku); -} - -void -upcall_reap(void) -{ - TAILQ_HEAD(, kse_upcall) zupcalls; - struct kse_upcall *ku_item, *ku_tmp; - - TAILQ_INIT(&zupcalls); - mtx_lock_spin(&kse_lock); - if (!TAILQ_EMPTY(&zombie_upcalls)) { - TAILQ_CONCAT(&zupcalls, &zombie_upcalls, ku_link); - TAILQ_INIT(&zombie_upcalls); - } - mtx_unlock_spin(&kse_lock); - TAILQ_FOREACH_SAFE(ku_item, &zupcalls, ku_link, ku_tmp) - uma_zfree(upcall_zone, ku_item); -} - -void -upcall_remove(struct thread *td) -{ - - PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED); - THREAD_LOCK_ASSERT(td, MA_OWNED); - if (td->td_upcall != NULL) { - /* - * If we are not a bound thread then decrement the count of - * possible upcall sources - */ - if (td->td_pflags & TDP_SA) - td->td_proc->p_numupcalls--; - mtx_lock_spin(&kse_lock); - td->td_upcall->ku_owner = NULL; - TAILQ_REMOVE(&td->td_upcall->ku_proc->p_upcalls, td->td_upcall, - ku_link); - TAILQ_INSERT_HEAD(&zombie_upcalls, td->td_upcall, ku_link); - mtx_unlock_spin(&kse_lock); - td->td_upcall = NULL; - } -} -#endif - -#ifndef _SYS_SYSPROTO_H_ -struct kse_switchin_args { - struct kse_thr_mailbox *tmbx; - int flags; -}; -#endif - -#ifdef KSE -void -kse_unlink(struct thread *td) -{ - mtx_lock_spin(&kse_lock); - thread_unlink(td); - mtx_unlock_spin(&kse_lock); - upcall_remove(td); -} -#endif - -int -kse_switchin(struct thread *td, struct kse_switchin_args *uap) -{ -#ifdef KSE - struct kse_thr_mailbox tmbx; - struct kse_upcall *ku; - int error; - - thread_lock(td); - if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) { - thread_unlock(td); - return (EINVAL); - } - thread_unlock(td); - error = (uap->tmbx == NULL) ? EINVAL : 0; - if (!error) - error = copyin(uap->tmbx, &tmbx, sizeof(tmbx)); - if (!error && (uap->flags & KSE_SWITCHIN_SETTMBX)) - error = (suword(&ku->ku_mailbox->km_curthread, - (long)uap->tmbx) != 0 ? EINVAL : 0); - if (!error) - error = set_mcontext(td, &tmbx.tm_context.uc_mcontext); - if (!error) { - suword32(&uap->tmbx->tm_lwp, td->td_tid); - if (uap->flags & KSE_SWITCHIN_SETTMBX) { - td->td_mailbox = uap->tmbx; - td->td_pflags |= TDP_CAN_UNBIND; - } - PROC_LOCK(td->td_proc); - if (td->td_proc->p_flag & P_TRACED) { - _PHOLD(td->td_proc); - if (tmbx.tm_dflags & TMDF_SSTEP) - ptrace_single_step(td); - else - ptrace_clear_single_step(td); - if (tmbx.tm_dflags & TMDF_SUSPEND) { - thread_lock(td); - /* fuword can block, check again */ - if (td->td_upcall) - ku->ku_flags |= KUF_DOUPCALL; - thread_unlock(td); - } - _PRELE(td->td_proc); - } - PROC_UNLOCK(td->td_proc); - } - return ((error == 0) ? EJUSTRETURN : error); -#else /* !KSE */ - return (EOPNOTSUPP); -#endif -} - -/* -struct kse_thr_interrupt_args { - struct kse_thr_mailbox * tmbx; - int cmd; - long data; -}; -*/ -int -kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) -{ -#ifdef KSE - struct kse_execve_args args; - struct image_args iargs; - struct proc *p; - struct thread *td2; - struct kse_upcall *ku; - struct kse_thr_mailbox *tmbx; - uint32_t flags; - int error; - - p = td->td_proc; - - PROC_LOCK(p); - if (!(p->p_flag & P_SA)) { - PROC_UNLOCK(p); - return (EINVAL); - } - PROC_UNLOCK(p); - - switch (uap->cmd) { - case KSE_INTR_SENDSIG: - if (uap->data < 0 || uap->data > _SIG_MAXSIG) - return (EINVAL); - case KSE_INTR_INTERRUPT: - case KSE_INTR_RESTART: - PROC_LOCK(p); - PROC_SLOCK(p); - FOREACH_THREAD_IN_PROC(p, td2) { - if (td2->td_mailbox == uap->tmbx) - break; - } - if (td2 == NULL) { - PROC_SUNLOCK(p); - PROC_UNLOCK(p); - return (ESRCH); - } - thread_lock(td2); - PROC_SUNLOCK(p); - if (uap->cmd == KSE_INTR_SENDSIG) { - if (uap->data > 0) { - td2->td_flags &= ~TDF_INTERRUPT; - thread_unlock(td2); - tdsignal(p, td2, (int)uap->data, NULL); - } else { - thread_unlock(td2); - } - } else { - td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING; - if (TD_CAN_UNBIND(td2)) - td2->td_upcall->ku_flags |= KUF_DOUPCALL; - if (uap->cmd == KSE_INTR_INTERRUPT) - td2->td_intrval = EINTR; - else - td2->td_intrval = ERESTART; - if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) - sleepq_abort(td2, td2->td_intrval); - thread_unlock(td2); - } - PROC_UNLOCK(p); - break; - case KSE_INTR_SIGEXIT: - if (uap->data < 1 || uap->data > _SIG_MAXSIG) - return (EINVAL); - PROC_LOCK(p); - sigexit(td, (int)uap->data); - break; - - case KSE_INTR_DBSUSPEND: - /* this sub-function is only for bound thread */ - if (td->td_pflags & TDP_SA) - return (EINVAL); - thread_lock(td); - ku = td->td_upcall; - thread_unlock(td); - tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); - if (tmbx == NULL || tmbx == (void *)-1) - return (EINVAL); - flags = 0; - PROC_LOCK(p); - while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) { - flags = fuword32(&tmbx->tm_dflags); - if (!(flags & TMDF_SUSPEND)) - break; - PROC_SLOCK(p); - thread_stopped(p); - PROC_UNLOCK(p); - thread_lock(td); - thread_suspend_one(td); - PROC_SUNLOCK(p); - mi_switch(SW_VOL, NULL); - thread_unlock(td); - PROC_LOCK(p); - } - PROC_UNLOCK(p); - return (0); - - case KSE_INTR_EXECVE: - error = copyin((void *)uap->data, &args, sizeof(args)); - if (error) - return (error); - error = exec_copyin_args(&iargs, args.path, UIO_USERSPACE, - args.argv, args.envp); - if (error == 0) - error = kern_execve(td, &iargs, NULL); - if (error == 0) { - PROC_LOCK(p); - SIGSETOR(td->td_siglist, args.sigpend); - PROC_UNLOCK(p); - kern_sigprocmask(td, SIG_SETMASK, &args.sigmask, NULL, - 0); - } - return (error); - - default: - return (EINVAL); - } - return (0); -#else /* !KSE */ - return (EOPNOTSUPP); -#endif -} - -/* -struct kse_exit_args { - register_t dummy; -}; -*/ -int -kse_exit(struct thread *td, struct kse_exit_args *uap) -{ -#ifdef KSE - struct proc *p; - struct kse_upcall *ku, *ku2; - int error, count; - - p = td->td_proc; - /* - * Ensure that this is only called from the UTS - */ - thread_lock(td); - if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) { - thread_unlock(td); - return (EINVAL); - } - thread_unlock(td); - - /* - * Calculate the existing non-exiting upcalls in this process. - * If we are the last upcall but there are still other threads, - * then do not exit. We need the other threads to be able to - * complete whatever they are doing. - * XXX This relies on the userland knowing what to do if we return. - * It may be a better choice to convert ourselves into a kse_release - * ( or similar) and wait in the kernel to be needed. - * XXX Where are those other threads? I suppose they are waiting in - * the kernel. We should wait for them all at the user boundary after - * turning into an exit. - */ - count = 0; - PROC_LOCK(p); - PROC_SLOCK(p); - FOREACH_UPCALL_IN_PROC(p, ku2) { - if ((ku2->ku_flags & KUF_EXITING) == 0) - count++; - } - if (count == 1 && (p->p_numthreads > 1)) { - PROC_SUNLOCK(p); - PROC_UNLOCK(p); - return (EDEADLK); - } - ku->ku_flags |= KUF_EXITING; - PROC_SUNLOCK(p); - PROC_UNLOCK(p); - - /* - * Mark the UTS mailbox as having been finished with. - * If that fails then just go for a segfault. - * XXX need to check it that can be deliverred without a mailbox. - */ - error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); - if (!(td->td_pflags & TDP_SA)) - if (suword32(&td->td_mailbox->tm_lwp, 0)) - error = EFAULT; - PROC_LOCK(p); - if (error) - psignal(p, SIGSEGV); - sigqueue_flush(&td->td_sigqueue); - PROC_SLOCK(p); - thread_lock(td); - upcall_remove(td); - thread_unlock(td); - if (p->p_numthreads != 1) { - thread_stopped(p); - thread_exit(); - /* NOTREACHED */ - } - /* - * This is the last thread. Just return to the user. - * Effectively we have left threading mode.. - * The only real thing left to do is ensure that the - * scheduler sets out concurrency back to 1 as that may be a - * resource leak otherwise. - * This is an A[PB]I issue.. what SHOULD we do? - * One possibility is to return to the user. It may not cope well. - * The other possibility would be to let the process exit. - */ - thread_unthread(td); - PROC_SUNLOCK(p); - PROC_UNLOCK(p); -#if 0 - return (0); -#else - printf("kse_exit: called on last thread. Calling exit1()"); - exit1(td, 0); -#endif -#else /* !KSE */ - return (EOPNOTSUPP); -#endif -} - -/* - * Either becomes an upcall or waits for an awakening event and - * then becomes an upcall. Only error cases return. - */ -/* -struct kse_release_args { - struct timespec *timeout; -}; -*/ -int -kse_release(struct thread *td, struct kse_release_args *uap) -{ -#ifdef KSE - struct proc *p; - struct kse_upcall *ku; - struct timespec timeout; - struct timeval tv; - sigset_t sigset; - int error; - - p = td->td_proc; - thread_lock(td); - if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) { - thread_unlock(td); - printf("kse_release: called outside of threading. exiting"); - exit1(td, 0); - } - thread_unlock(td); - if (uap->timeout != NULL) { - if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) - return (error); - TIMESPEC_TO_TIMEVAL(&tv, &timeout); - } - if (td->td_pflags & TDP_SA) - td->td_pflags |= TDP_UPCALLING; - else { - ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags); - if (ku->ku_mflags == -1) { - PROC_LOCK(p); - sigexit(td, SIGSEGV); - } - } - PROC_LOCK(p); - if (ku->ku_mflags & KMF_WAITSIGEVENT) { - /* UTS wants to wait for signal event */ - if (!(p->p_flag & P_SIGEVENT) && - !(ku->ku_flags & KUF_DOUPCALL)) { - td->td_kflags |= TDK_KSERELSIG; - error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH, - "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0)); - td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP); - } - p->p_flag &= ~P_SIGEVENT; - sigset = p->p_siglist; - PROC_UNLOCK(p); - error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught, - sizeof(sigset)); - } else { - if ((ku->ku_flags & KUF_DOUPCALL) == 0 && - ((ku->ku_mflags & KMF_NOCOMPLETED) || - (p->p_completed == NULL))) { - p->p_upsleeps++; - td->td_kflags |= TDK_KSEREL; - error = msleep(&p->p_completed, &p->p_mtx, - PPAUSE|PCATCH, "kserel", - (uap->timeout ? tvtohz(&tv) : 0)); - td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP); - p->p_upsleeps--; - } - PROC_UNLOCK(p); - } - if (ku->ku_flags & KUF_DOUPCALL) { - PROC_SLOCK(p); - ku->ku_flags &= ~KUF_DOUPCALL; - PROC_SUNLOCK(p); - } - return (0); -#else /* !KSE */ - return (EOPNOTSUPP); -#endif -} - -/* struct kse_wakeup_args { - struct kse_mailbox *mbx; -}; */ -int -kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) -{ -#ifdef KSE - struct proc *p; - struct kse_upcall *ku; - struct thread *td2; - - p = td->td_proc; - td2 = NULL; - ku = NULL; - /* KSE-enabled processes only, please. */ - PROC_LOCK(p); - if (!(p->p_flag & P_SA)) { - PROC_UNLOCK(p); - return (EINVAL); - } - PROC_SLOCK(p); - if (uap->mbx) { - FOREACH_UPCALL_IN_PROC(p, ku) { - if (ku->ku_mailbox == uap->mbx) - break; - } - } else { - if (p->p_upsleeps) { - PROC_SUNLOCK(p); - wakeup(&p->p_completed); - PROC_UNLOCK(p); - return (0); - } - ku = TAILQ_FIRST(&p->p_upcalls); - } - if (ku == NULL) { - PROC_SUNLOCK(p); - PROC_UNLOCK(p); - return (ESRCH); - } - mtx_lock_spin(&kse_lock); - if ((td2 = ku->ku_owner) == NULL) { - mtx_unlock_spin(&kse_lock); - PROC_SUNLOCK(p); - PROC_UNLOCK(p); - panic("%s: no owner", __func__); - } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) { - mtx_unlock_spin(&kse_lock); - if (!(td2->td_kflags & TDK_WAKEUP)) { - td2->td_kflags |= TDK_WAKEUP; - if (td2->td_kflags & TDK_KSEREL) - sleepq_remove(td2, &p->p_completed); - else - sleepq_remove(td2, &p->p_siglist); - } - } else { - ku->ku_flags |= KUF_DOUPCALL; - mtx_unlock_spin(&kse_lock); - } - PROC_SUNLOCK(p); - PROC_UNLOCK(p); - return (0); -#else /* !KSE */ - return (EOPNOTSUPP); -#endif -} - -/* - * newgroup == 0: first call: use current KSE, don't schedule an upcall - * All other situations, do allocate max new KSEs and schedule an upcall. - * - * XXX should be changed so that 'first' behaviour lasts for as long - * as you have not made a thread in this proc. i.e. as long as we do not have - * a mailbox.. - */ -/* struct kse_create_args { - struct kse_mailbox *mbx; - int newgroup; -}; */ -int -kse_create(struct thread *td, struct kse_create_args *uap) -{ -#ifdef KSE - struct proc *p; - struct kse_mailbox mbx; - struct kse_upcall *newku; - int err, ncpus, sa = 0, first = 0; - struct thread *newtd; - - p = td->td_proc; - - /* - * Processes using the other threading model can't - * suddenly start calling this one - * XXX maybe... - */ - PROC_LOCK(p); - if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) { - PROC_UNLOCK(p); - return (EINVAL); - } - if (!(p->p_flag & P_SA)) { - first = 1; - p->p_flag |= P_SA|P_HADTHREADS; - } - PROC_UNLOCK(p); - - if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) - return (err); - - ncpus = mp_ncpus; - if (virtual_cpu != 0) - ncpus = virtual_cpu; - /* - * If the new UTS mailbox says that this - * will be a BOUND lwp, then it had better - * have its thread mailbox already there. - */ - if ((mbx.km_flags & KMF_BOUND) || uap->newgroup) { - /* It's a bound thread (1:1) */ - if (mbx.km_curthread == NULL) - return (EINVAL); - ncpus = 1; - if (!(uap->newgroup || first)) - return (EINVAL); - } else { - /* It's an upcall capable thread */ - sa = TDP_SA; - PROC_LOCK(p); - /* - * Limit it to NCPU upcall contexts per proc in any case. - * numupcalls will soon be numkse or something - * as it will represent the number of - * non-bound upcalls available. (i.e. ones that can - * actually call up). - */ - if (p->p_numupcalls >= ncpus) { - PROC_UNLOCK(p); - return (EPROCLIM); - } - p->p_numupcalls++; - PROC_UNLOCK(p); - } - - /* - * For the first call this may not have been set. - * Of course nor may it actually be needed. - * thread_schedule_upcall() will look for it. - */ - if (td->td_standin == NULL) { - if (!thread_alloc_spare(td)) - return (ENOMEM); - } - - /* - * Even bound LWPs get a mailbox and an upcall to hold it. - * XXX This should change. - */ - newku = upcall_alloc(); - newku->ku_mailbox = uap->mbx; - newku->ku_func = mbx.km_func; - bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); - - PROC_LOCK(p); - PROC_SLOCK(p); - /* - * If we are the first time, and a normal thread, - * then transfer all the signals back to the 'process'. - * SA threading will make a special thread to handle them. - */ - if (first) { - sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue, - &td->td_sigqueue.sq_signals); - SIGFILLSET(td->td_sigmask); - SIG_CANTMASK(td->td_sigmask); - } - - /* - * Make the new upcall available to the process. - * It may or may not use it, but it's available. - */ - TAILQ_INSERT_TAIL(&p->p_upcalls, newku, ku_link); - newku->ku_proc = p; - PROC_UNLOCK(p); - if (mbx.km_quantum) -/* XXX should this be in the thread? */ - p->p_upquantum = max(1, mbx.km_quantum / tick); - - /* - * Each upcall structure has an owner thread, find which - * one owns it. - */ - thread_lock(td); - mtx_lock_spin(&kse_lock); - if (uap->newgroup) { - /* - * The newgroup parameter now means - * "bound, non SA, system scope" - * It is only used for the interrupt thread at the - * moment I think.. (or system scope threads dopey). - * We'll rename it later. - */ - newtd = thread_schedule_upcall(td, newku); - } else { - /* - * If the current thread hasn't an upcall structure, - * just assign the upcall to it. - * It'll just return. - */ - if (td->td_upcall == NULL) { - newku->ku_owner = td; - td->td_upcall = newku; - newtd = td; - } else { - /* - * Create a new upcall thread to own it. - */ - newtd = thread_schedule_upcall(td, newku); - } - } - mtx_unlock_spin(&kse_lock); - thread_unlock(td); - PROC_SUNLOCK(p); - - /* - * Let the UTS instance know its LWPID. - * It doesn't really care. But the debugger will. - * XXX warning.. remember that this moves. - */ - suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid); - - /* - * In the same manner, if the UTS has a current user thread, - * then it is also running on this LWP so set it as well. - * The library could do that of course.. but why not.. - * XXX I'm not sure this can ever happen but ... - * XXX does the UTS ever set this in the mailbox before calling this? - */ - if (mbx.km_curthread) - suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid); - - if (sa) { - newtd->td_pflags |= TDP_SA; - /* - * If we are starting a new thread, kick it off. - */ - if (newtd != td) { - thread_lock(newtd); - sched_add(newtd, SRQ_BORING); - thread_unlock(newtd); - } - } else { - newtd->td_pflags &= ~TDP_SA; - - /* - * Since a library will use the mailbox pointer to - * identify even a bound thread, and the mailbox pointer - * will never be allowed to change after this syscall - * for a bound thread, set it here so the library can - * find the thread after the syscall returns. - */ - newtd->td_mailbox = mbx.km_curthread; - - if (newtd != td) { - /* - * If we did create a new thread then - * make sure it goes to the right place - * when it starts up, and make sure that it runs - * at full speed when it gets there. - * thread_schedule_upcall() copies all cpu state - * to the new thread, so we should clear single step - * flag here. - */ - cpu_set_upcall_kse(newtd, newku->ku_func, - newku->ku_mailbox, &newku->ku_stack); - PROC_LOCK(p); - if (p->p_flag & P_TRACED) { - _PHOLD(p); - ptrace_clear_single_step(newtd); - _PRELE(p); - } - PROC_UNLOCK(p); - thread_lock(newtd); - sched_add(newtd, SRQ_BORING); - thread_unlock(newtd); - } - } - return (0); -#else /* !KSE */ - return (EOPNOTSUPP); -#endif -} - -#ifdef KSE -/* - * Initialize global thread allocation resources. - */ -void -kseinit(void) -{ - - upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), - NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); -} - -/* - * Store the thread context in the UTS's mailbox. - * then add the mailbox at the head of a list we are building in user space. - * The list is anchored in the proc structure. - */ -int -thread_export_context(struct thread *td, int willexit) -{ - struct proc *p; - uintptr_t mbx; - void *addr; - int error = 0, sig; - mcontext_t mc; - - p = td->td_proc; - - /* - * Post sync signal, or process SIGKILL and SIGSTOP. - * For sync signal, it is only possible when the signal is not - * caught by userland or process is being debugged. - */ - PROC_LOCK(p); - if (td->td_flags & TDF_NEEDSIGCHK) { - thread_lock(td); - td->td_flags &= ~TDF_NEEDSIGCHK; - thread_unlock(td); - mtx_lock(&p->p_sigacts->ps_mtx); - while ((sig = cursig(td)) != 0) - postsig(sig); - mtx_unlock(&p->p_sigacts->ps_mtx); - } - if (willexit) - SIGFILLSET(td->td_sigmask); - PROC_UNLOCK(p); - - /* Export the user/machine context. */ - get_mcontext(td, &mc, 0); - addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); - error = copyout(&mc, addr, sizeof(mcontext_t)); - if (error) - goto bad; - - addr = (caddr_t)(&td->td_mailbox->tm_lwp); - if (suword32(addr, 0)) { - error = EFAULT; - goto bad; - } - - /* Get address in latest mbox of list pointer */ - addr = (void *)(&td->td_mailbox->tm_next); - /* - * Put the saved address of the previous first - * entry into this one - */ - for (;;) { - mbx = (uintptr_t)p->p_completed; - if (suword(addr, mbx)) { - error = EFAULT; - goto bad; - } - PROC_LOCK(p); - if (mbx == (uintptr_t)p->p_completed) { - thread_lock(td); - p->p_completed = td->td_mailbox; - /* - * The thread context may be taken away by - * other upcall threads when we unlock - * process lock. it's no longer valid to - * use it again in any other places. - */ - td->td_mailbox = NULL; - thread_unlock(td); - PROC_UNLOCK(p); - break; - } - PROC_UNLOCK(p); - } - td->td_usticks = 0; - return (0); - -bad: - PROC_LOCK(p); - sigexit(td, SIGILL); - return (error); -} - -/* - * Take the list of completed mailboxes for this Process and put them on this - * upcall's mailbox as it's the next one going up. - */ -static int -thread_link_mboxes(struct proc *p, struct kse_upcall *ku) -{ - void *addr; - uintptr_t mbx; - - addr = (void *)(&ku->ku_mailbox->km_completed); - for (;;) { - mbx = (uintptr_t)p->p_completed; - if (suword(addr, mbx)) { - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - return (EFAULT); - } - PROC_LOCK(p); - if (mbx == (uintptr_t)p->p_completed) { - p->p_completed = NULL; - PROC_UNLOCK(p); - break; - } - PROC_UNLOCK(p); - } - return (0); -} - -/* - * This function should be called at statclock interrupt time - */ -int -thread_statclock(int user) -{ - struct thread *td = curthread; - - if (!(td->td_pflags & TDP_SA)) - return (0); - if (user) { - /* Current always do via ast() */ - thread_lock(td); - td->td_flags |= TDF_ASTPENDING; - thread_unlock(td); - td->td_uuticks++; - } else if (td->td_mailbox != NULL) - td->td_usticks++; - return (0); -} - -/* - * Export state clock ticks for userland - */ -static int -thread_update_usr_ticks(struct thread *td) -{ - struct proc *p = td->td_proc; - caddr_t addr; - u_int uticks; - - thread_lock(td); - if (td->td_mailbox == NULL) { - thread_unlock(td); - return (-1); - } - thread_unlock(td); - - if ((uticks = td->td_uuticks) != 0) { - td->td_uuticks = 0; - addr = (caddr_t)&td->td_mailbox->tm_uticks; - if (suword32(addr, uticks+fuword32(addr))) - goto error; - } - if ((uticks = td->td_usticks) != 0) { - td->td_usticks = 0; - addr = (caddr_t)&td->td_mailbox->tm_sticks; - if (suword32(addr, uticks+fuword32(addr))) - goto error; - } - return (0); - -error: - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - return (-2); -} - -/* - * This function is intended to be used to initialize a spare thread - * for upcall. Initialize thread's large data area outside the thread lock - * for thread_schedule_upcall(). The crhold is also here to get it out - * from the schedlock as it has a mutex op itself. - * XXX BUG.. we need to get the cr ref after the thread has - * checked and chenged its own, not 6 months before... - */ -int -thread_alloc_spare(struct thread *td) -{ - struct thread *spare; - - if (td->td_standin) - return (1); - spare = thread_alloc(); - if (spare == NULL) - return (0); - td->td_standin = spare; - bzero(&spare->td_startzero, - __rangeof(struct thread, td_startzero, td_endzero)); - spare->td_proc = td->td_proc; - spare->td_ucred = crhold(td->td_ucred); - spare->td_flags = TDF_INMEM; - return (1); -} - -/* - * Create a thread and schedule it for upcall on the KSE given. - * Use our thread's standin so that we don't have to allocate one. - */ -struct thread * -thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) -{ - struct thread *td2; - - THREAD_LOCK_ASSERT(td, MA_OWNED); - mtx_assert(&kse_lock, MA_OWNED); - /* - * Schedule an upcall thread on specified kse_upcall, - * the kse_upcall must be free. - * td must have a spare thread. - */ - KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); - if ((td2 = td->td_standin) != NULL) { - td->td_standin = NULL; - } else { - panic("no reserve thread when scheduling an upcall"); - return (NULL); - } - CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", - td2, td->td_proc->p_pid, td->td_name); - /* - * Bzero already done in thread_alloc_spare() because we can't - * do the crhold here because we are in schedlock already. - */ - bcopy(&td->td_startcopy, &td2->td_startcopy, - __rangeof(struct thread, td_startcopy, td_endcopy)); - sched_fork_thread(td, td2); - thread_link(td2, ku->ku_proc); - bcopy(ku->ku_proc->p_comm, td2->td_name, sizeof(td2->td_name)); - /* inherit parts of blocked thread's context as a good template */ - cpu_set_upcall(td2, td); - /* Let the new thread become owner of the upcall */ - ku->ku_owner = td2; - td2->td_upcall = ku; - td2->td_pflags = TDP_SA|TDP_UPCALLING; - td2->td_state = TDS_CAN_RUN; - td2->td_inhibitors = 0; - SIGFILLSET(td2->td_sigmask); - SIG_CANTMASK(td2->td_sigmask); - return (td2); /* bogus.. should be a void function */ -} - -/* - * It is only used when thread generated a trap and process is being - * debugged. - */ -void -thread_signal_add(struct thread *td, ksiginfo_t *ksi) -{ - struct proc *p; - struct sigacts *ps; - int error; - - p = td->td_proc; - PROC_LOCK_ASSERT(p, MA_OWNED); - ps = p->p_sigacts; - mtx_assert(&ps->ps_mtx, MA_OWNED); - - mtx_unlock(&ps->ps_mtx); - SIGADDSET(td->td_sigmask, ksi->ksi_signo); - PROC_UNLOCK(p); - error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig, - sizeof(siginfo_t)); - if (error) { - PROC_LOCK(p); - sigexit(td, SIGSEGV); - } - PROC_LOCK(p); - mtx_lock(&ps->ps_mtx); -} -#include "opt_sched.h" -struct thread * -thread_switchout(struct thread *td, int flags, struct thread *nextthread) -{ - struct kse_upcall *ku; - struct thread *td2; - - THREAD_LOCK_ASSERT(td, MA_OWNED); - - /* - * If the outgoing thread is in threaded group and has never - * scheduled an upcall, decide whether this is a short - * or long term event and thus whether or not to schedule - * an upcall. - * If it is a short term event, just suspend it in - * a way that takes its KSE with it. - * Select the events for which we want to schedule upcalls. - * For now it's just sleep or if thread is suspended but - * process wide suspending flag is not set (debugger - * suspends thread). - * XXXKSE eventually almost any inhibition could do. - */ - if (TD_CAN_UNBIND(td) && (td->td_standin) && - (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) && - !P_SHOULDSTOP(td->td_proc)))) { - /* - * Release ownership of upcall, and schedule an upcall - * thread, this new upcall thread becomes the owner of - * the upcall structure. It will be ahead of us in the - * run queue, so as we are stopping, it should either - * start up immediatly, or at least before us if - * we release our slot. - */ - mtx_lock_spin(&kse_lock); - ku = td->td_upcall; - ku->ku_owner = NULL; - td->td_upcall = NULL; - td->td_pflags &= ~TDP_CAN_UNBIND; - td2 = thread_schedule_upcall(td, ku); - mtx_unlock_spin(&kse_lock); - if (flags & SW_INVOL || nextthread) { - thread_lock(td2); - sched_add(td2, SRQ_YIELDING); - thread_unlock(td2); - } else { - /* Keep up with reality.. we have one extra thread - * in the picture.. and it's 'running'. - */ - return td2; - } - } - return (nextthread); -} - -/* - * Setup done on the thread when it enters the kernel. - */ -void -thread_user_enter(struct thread *td) -{ - struct proc *p = td->td_proc; - struct kse_upcall *ku; - struct kse_thr_mailbox *tmbx; - uint32_t flags; - - /* - * First check that we shouldn't just abort. we - * can suspend it here or just exit. - */ - if (__predict_false(P_SHOULDSTOP(p))) { - PROC_LOCK(p); - thread_suspend_check(0); - PROC_UNLOCK(p); - } - - if (!(td->td_pflags & TDP_SA)) - return; - - /* - * If we are doing a syscall in a KSE environment, - * note where our mailbox is. - */ - - thread_lock(td); - ku = td->td_upcall; - thread_unlock(td); - - KASSERT(ku != NULL, ("no upcall owned")); - KASSERT(ku->ku_owner == td, ("wrong owner")); - KASSERT(!TD_CAN_UNBIND(td), ("can unbind")); - - if (td->td_standin == NULL) { - if (!thread_alloc_spare(td)) { - PROC_LOCK(p); - if (kern_logsigexit) - log(LOG_INFO, - "pid %d (%s), uid %d: thread_alloc_spare failed\n", - p->p_pid, p->p_comm, - td->td_ucred ? td->td_ucred->cr_uid : -1); - sigexit(td, SIGSEGV); /* XXX ? */ - /* panic("thread_user_enter: thread_alloc_spare failed"); */ - } - } - ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags); - tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); - if ((tmbx == NULL) || (tmbx == (void *)-1L) || - (ku->ku_mflags & KMF_NOUPCALL)) { - td->td_mailbox = NULL; - } else { - flags = fuword32(&tmbx->tm_flags); - /* - * On some architectures, TP register points to thread - * mailbox but not points to kse mailbox, and userland - * can not atomically clear km_curthread, but can - * use TP register, and set TMF_NOUPCALL in thread - * flag to indicate a critical region. - */ - if (flags & TMF_NOUPCALL) { - td->td_mailbox = NULL; - } else { - td->td_mailbox = tmbx; - td->td_pflags |= TDP_CAN_UNBIND; - PROC_LOCK(p); - if (__predict_false(p->p_flag & P_TRACED)) { - flags = fuword32(&tmbx->tm_dflags); - if (flags & TMDF_SUSPEND) { - thread_lock(td); - /* fuword can block, check again */ - if (td->td_upcall) - ku->ku_flags |= KUF_DOUPCALL; - thread_unlock(td); - } - } - PROC_UNLOCK(p); - } - } -} - -/* - * The extra work we go through if we are a threaded process when we - * return to userland. - * - * If we are a KSE process and returning to user mode, check for - * extra work to do before we return (e.g. for more syscalls - * to complete first). If we were in a critical section, we should - * just return to let it finish. Same if we were in the UTS (in - * which case the mailbox's context's busy indicator will be set). - * The only traps we suport will have set the mailbox. - * We will clear it here. - */ -int -thread_userret(struct thread *td, struct trapframe *frame) -{ - struct kse_upcall *ku; - struct proc *p; - struct timespec ts; - int error = 0, uts_crit; - - /* Nothing to do with bound thread */ - if (!(td->td_pflags & TDP_SA)) - return (0); - - /* - * Update stat clock count for userland - */ - if (td->td_mailbox != NULL) { - thread_update_usr_ticks(td); - uts_crit = 0; - } else { - uts_crit = 1; - } - - p = td->td_proc; - thread_lock(td); - ku = td->td_upcall; - - /* - * Optimisation: - * This thread has not started any upcall. - * If there is no work to report other than ourself, - * then it can return direct to userland. - */ - if (TD_CAN_UNBIND(td)) { - thread_unlock(td); - td->td_pflags &= ~TDP_CAN_UNBIND; - if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && - (p->p_completed == NULL) && - (ku->ku_flags & KUF_DOUPCALL) == 0 && - (p->p_upquantum && ticks < p->p_nextupcall)) { - nanotime(&ts); - error = copyout(&ts, - (caddr_t)&ku->ku_mailbox->km_timeofday, - sizeof(ts)); - td->td_mailbox = 0; - ku->ku_mflags = 0; - if (error) - goto out; - return (0); - } - thread_export_context(td, 0); - /* - * There is something to report, and we own an upcall - * structure, we can go to userland. - * Turn ourself into an upcall thread. - */ - td->td_pflags |= TDP_UPCALLING; - } else if (td->td_mailbox && (ku == NULL)) { - thread_unlock(td); - thread_export_context(td, 1); - PROC_LOCK(p); - if (p->p_upsleeps) - wakeup(&p->p_completed); - WITNESS_WARN(WARN_PANIC, &p->p_mtx.lock_object, - "thread exiting in userret"); - sigqueue_flush(&td->td_sigqueue); - PROC_SLOCK(p); - thread_stopped(p); - thread_exit(); - /* NOTREACHED */ - } else - thread_unlock(td); - - KASSERT(ku != NULL, ("upcall is NULL")); - KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); - - PROC_LOCK(p); - PROC_SLOCK(p); - if (p->p_numthreads > max_threads_per_proc) { - max_threads_hits++; - while (p->p_numthreads > max_threads_per_proc) { - if (p->p_numupcalls >= max_threads_per_proc) - break; - PROC_SUNLOCK(p); - if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, - "maxthreads", hz/10) != EWOULDBLOCK) { - PROC_SLOCK(p); - break; - } else - PROC_SLOCK(p); - } - } - PROC_SUNLOCK(p); - PROC_UNLOCK(p); - - if (td->td_pflags & TDP_UPCALLING) { - uts_crit = 0; - p->p_nextupcall = ticks + p->p_upquantum; - /* - * There is no more work to do and we are going to ride - * this thread up to userland as an upcall. - * Do the last parts of the setup needed for the upcall. - */ - CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", - td, td->td_proc->p_pid, td->td_name); - - td->td_pflags &= ~TDP_UPCALLING; - if (ku->ku_flags & KUF_DOUPCALL) { - PROC_SLOCK(p); - ku->ku_flags &= ~KUF_DOUPCALL; - PROC_SUNLOCK(p); - } - /* - * Set user context to the UTS - */ - if (!(ku->ku_mflags & KMF_NOUPCALL)) { - cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox, - &ku->ku_stack); - PROC_LOCK(p); - if (p->p_flag & P_TRACED) { - _PHOLD(p); - ptrace_clear_single_step(td); - _PRELE(p); - } - PROC_UNLOCK(p); - error = suword32(&ku->ku_mailbox->km_lwp, - td->td_tid); - if (error) - goto out; - error = suword(&ku->ku_mailbox->km_curthread, 0); - if (error) - goto out; - } - - /* - * Unhook the list of completed threads. - * anything that completes after this gets to - * come in next time. - * Put the list of completed thread mailboxes on - * this KSE's mailbox. - */ - if (!(ku->ku_mflags & KMF_NOCOMPLETED) && - (error = thread_link_mboxes(p, ku)) != 0) - goto out; - } - if (!uts_crit) { - nanotime(&ts); - error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); - } - -out: - if (error) { - /* - * Things are going to be so screwed we should just kill - * the process. - * how do we do that? - */ - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - } else { - /* - * Optimisation: - * Ensure that we have a spare thread available, - * for when we re-enter the kernel. - */ - if (td->td_standin == NULL) - thread_alloc_spare(td); /* XXX care of failure ? */ - } - - ku->ku_mflags = 0; - td->td_mailbox = NULL; - td->td_usticks = 0; - return (error); /* go sync */ -} - -/* - * called after ptrace resumed a process, force all - * virtual CPUs to schedule upcall for SA process, - * because debugger may have changed something in userland, - * we should notice UTS as soon as possible. - */ -void -thread_continued(struct proc *p) -{ - struct kse_upcall *ku; - struct thread *td; - - PROC_LOCK_ASSERT(p, MA_OWNED); - KASSERT(P_SHOULDSTOP(p), ("process not stopped")); - - if (!(p->p_flag & P_SA)) - return; - - if (p->p_flag & P_TRACED) { - td = TAILQ_FIRST(&p->p_threads); - if (td && (td->td_pflags & TDP_SA)) { - FOREACH_UPCALL_IN_PROC(p, ku) { - PROC_SLOCK(p); - ku->ku_flags |= KUF_DOUPCALL; - PROC_SUNLOCK(p); - wakeup(&p->p_completed); - } - } - } -} -#endif diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index c398a8d..5ca1e60 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -645,7 +645,7 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) kp->ki_structsize = sizeof(*kp); kp->ki_paddr = p; PROC_LOCK_ASSERT(p, MA_OWNED); - kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */ + kp->ki_addr =/* p->p_addr; */0; /* XXX */ kp->ki_args = p->p_args; kp->ki_textvp = p->p_textvp; #ifdef KTRACE @@ -794,7 +794,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread) bzero(kp->ki_lockname, sizeof(kp->ki_lockname)); } - if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */ + if (p->p_state == PRS_NORMAL) { /* approximate. */ if (TD_ON_RUNQ(td) || TD_CAN_RUN(td) || TD_IS_RUNNING(td)) { diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index 5f8ea8f..b2b1daf 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -407,8 +407,6 @@ rtprio(td, uap) * or if one is, report the highest priority * in the process. There isn't much more you can do as * there is only room to return a single priority. - * XXXKSE: maybe need a new interface to report - * priorities of multiple system scope threads. * Note: specifying our own pid is not the same * as leaving it zero. */ diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 5be17ea..0e1c493 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -49,7 +49,6 @@ __FBSDID("$FreeBSD$"); #include <sys/event.h> #include <sys/fcntl.h> #include <sys/kernel.h> -#include <sys/kse.h> #include <sys/ktr.h> #include <sys/ktrace.h> #include <sys/lock.h> @@ -94,9 +93,6 @@ static int filt_sigattach(struct knote *kn); static void filt_sigdetach(struct knote *kn); static int filt_signal(struct knote *kn, long hint); static struct thread *sigtd(struct proc *p, int sig, int prop); -#ifdef KSE -static int do_tdsignal(struct proc *, struct thread *, int, ksiginfo_t *); -#endif static void sigqueue_start(void); static uma_zone_t ksiginfo_zone = NULL; @@ -566,11 +562,7 @@ void signotify(struct thread *td) { struct proc *p; -#ifdef KSE - sigset_t set, saved; -#else sigset_t set; -#endif p = td->td_proc; @@ -581,10 +573,6 @@ signotify(struct thread *td) * previously masked by all threads to our sigqueue. */ set = p->p_sigqueue.sq_signals; -#ifdef KSE - if (p->p_flag & P_SA) - saved = p->p_sigqueue.sq_signals; -#endif SIGSETNAND(set, td->td_sigmask); if (! SIGISEMPTY(set)) sigqueue_move_set(&p->p_sigqueue, &td->td_sigqueue, &set); @@ -593,15 +581,6 @@ signotify(struct thread *td) td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING; thread_unlock(td); } -#ifdef KSE - if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { - if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) { - /* pending set changed */ - p->p_flag |= P_SIGEVENT; - wakeup(&p->p_siglist); - } - } -#endif } int @@ -754,13 +733,6 @@ kern_sigaction(td, sig, act, oact, flags) if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || (sigprop(sig) & SA_IGNORE && ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { -#ifdef KSE - if ((p->p_flag & P_SA) && - SIGISMEMBER(p->p_sigqueue.sq_signals, sig)) { - p->p_flag |= P_SIGEVENT; - wakeup(&p->p_siglist); - } -#endif /* never to be seen again */ PROC_SLOCK(p); sigqueue_delete_proc(p, sig); @@ -1200,12 +1172,6 @@ restart: continue; if (!SIGISMEMBER(td->td_sigqueue.sq_signals, i)) { if (SIGISMEMBER(p->p_sigqueue.sq_signals, i)) { -#ifdef KSE - if (p->p_flag & P_SA) { - p->p_flag |= P_SIGEVENT; - wakeup(&p->p_siglist); - } -#endif sigqueue_move(&p->p_sigqueue, &td->td_sigqueue, i); } else @@ -1842,9 +1808,6 @@ trapsignal(struct thread *td, ksiginfo_t *ksi) { struct sigacts *ps; struct proc *p; -#ifdef KSE - int error; -#endif int sig; int code; @@ -1853,27 +1816,7 @@ trapsignal(struct thread *td, ksiginfo_t *ksi) code = ksi->ksi_code; KASSERT(_SIG_VALID(sig), ("invalid signal")); -#ifdef KSE - if (td->td_pflags & TDP_SA) { - if (td->td_mailbox == NULL) - thread_user_enter(td); - PROC_LOCK(p); - SIGDELSET(td->td_sigmask, sig); - thread_lock(td); - /* - * Force scheduling an upcall, so UTS has chance to - * process the signal before thread runs again in - * userland. - */ - if (td->td_upcall) - td->td_upcall->ku_flags |= KUF_DOUPCALL; - thread_unlock(td); - } else { - PROC_LOCK(p); - } -#else PROC_LOCK(p); -#endif ps = p->p_sigacts; mtx_lock(&ps->ps_mtx); if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && @@ -1884,34 +1827,8 @@ trapsignal(struct thread *td, ksiginfo_t *ksi) ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], &td->td_sigmask, code); #endif -#ifdef KSE - if (!(td->td_pflags & TDP_SA)) - (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], - ksi, &td->td_sigmask); -#else (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], ksi, &td->td_sigmask); -#endif -#ifdef KSE - else if (td->td_mailbox == NULL) { - mtx_unlock(&ps->ps_mtx); - /* UTS caused a sync signal */ - p->p_code = code; /* XXX for core dump/debugger */ - p->p_sig = sig; /* XXX to verify code */ - sigexit(td, sig); - } else { - mtx_unlock(&ps->ps_mtx); - SIGADDSET(td->td_sigmask, sig); - PROC_UNLOCK(p); - error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig, - sizeof(siginfo_t)); - PROC_LOCK(p); - /* UTS memory corrupted */ - if (error) - sigexit(td, SIGSEGV); - mtx_lock(&ps->ps_mtx); - } -#endif SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); if (!SIGISMEMBER(ps->ps_signodefer, sig)) SIGADDSET(td->td_sigmask, sig); @@ -2024,27 +1941,6 @@ psignal_event(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi) int tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) { -#ifdef KSE - sigset_t saved; - int ret; - - if (p->p_flag & P_SA) - saved = p->p_sigqueue.sq_signals; - ret = do_tdsignal(p, td, sig, ksi); - if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { - if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) { - /* pending set changed */ - p->p_flag |= P_SIGEVENT; - wakeup(&p->p_siglist); - } - } - return (ret); -} - -static int -do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) -{ -#endif sig_t action; sigqueue_t *sigqueue; int prop; @@ -2055,17 +1951,9 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) PROC_LOCK_ASSERT(p, MA_OWNED); if (!_SIG_VALID(sig)) -#ifdef KSE - panic("do_tdsignal(): invalid signal %d", sig); -#else panic("tdsignal(): invalid signal %d", sig); -#endif -#ifdef KSE - KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("do_tdsignal: ksi on queue")); -#else KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("tdsignal: ksi on queue")); -#endif /* * IEEE Std 1003.1-2001: return success when killing a zombie. @@ -2232,18 +2120,6 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) goto out; } if (action == SIG_CATCH) { -#ifdef KSE - /* - * The process wants to catch it so it needs - * to run at least one thread, but which one? - * It would seem that the answer would be to - * run an upcall in the next KSE to run, and - * deliver the signal that way. In a NON KSE - * process, we need to make sure that the - * single thread is runnable asap. - * XXXKSE for now however, make them all run. - */ -#endif /* * The process wants to catch it so it needs * to run at least one thread, but which one? @@ -2540,10 +2416,6 @@ issignal(td) */ if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) { sigqueue_delete(&td->td_sigqueue, sig); -#ifdef KSE - if (td->td_pflags & TDP_SA) - SIGADDSET(td->td_sigmask, sig); -#endif continue; } if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { @@ -2554,11 +2426,6 @@ issignal(td) newsig = ptracestop(td, sig); mtx_lock(&ps->ps_mtx); -#ifdef KSE - if (td->td_pflags & TDP_SA) - SIGADDSET(td->td_sigmask, sig); - -#endif if (sig != newsig) { ksiginfo_t ksi; /* @@ -2582,10 +2449,6 @@ issignal(td) * signal is being masked, look for other signals. */ SIGADDSET(td->td_sigqueue.sq_signals, sig); -#ifdef KSE - if (td->td_pflags & TDP_SA) - SIGDELSET(td->td_sigmask, sig); -#endif if (SIGISMEMBER(td->td_sigmask, sig)) continue; signotify(td); @@ -2739,11 +2602,7 @@ postsig(sig) mtx_lock(&ps->ps_mtx); } -#ifdef KSE - if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) { -#else if (action == SIG_DFL) { -#endif /* * Default action, where the default is to kill * the process. (Other cases were ignored above.) @@ -2752,15 +2611,6 @@ postsig(sig) sigexit(td, sig); /* NOTREACHED */ } else { -#ifdef KSE - if (td->td_pflags & TDP_SA) { - if (sig == SIGKILL) { - mtx_unlock(&ps->ps_mtx); - sigexit(td, sig); - } - } - -#endif /* * If we get here, the signal must be caught. */ @@ -2803,14 +2653,7 @@ postsig(sig) p->p_code = 0; p->p_sig = 0; } -#ifdef KSE - if (td->td_pflags & TDP_SA) - thread_signal_add(curthread, &ksi); - else - (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); -#else (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); -#endif } } diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index cdc5b8a..360a993 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -521,7 +521,7 @@ runq_choose_from(struct runq *rq, u_char idx) ts = TAILQ_FIRST(rqh); KASSERT(ts != NULL, ("runq_choose: no proc on busy queue")); CTR4(KTR_RUNQ, - "runq_choose_from: pri=%d kse=%p idx=%d rqh=%p", + "runq_choose_from: pri=%d td_sched=%p idx=%d rqh=%p", pri, ts, ts->ts_rqindex, rqh); return (ts); } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index f876147..71764e5 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -412,7 +412,7 @@ mi_switch(int flags, struct thread *newtd) td->td_generation++; /* bump preempt-detect counter */ PCPU_INC(cnt.v_swtch); PCPU_SET(switchticks, ticks); - CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)", + CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)", td->td_tid, td->td_sched, p->p_pid, td->td_name); #if (KTR_COMPILE & KTR_SCHED) != 0 if (TD_IS_IDLETHREAD(td)) @@ -429,19 +429,11 @@ mi_switch(int flags, struct thread *newtd) td, td->td_name, td->td_priority, td->td_inhibitors, td->td_wmesg, td->td_lockname); #endif - /* - * We call thread_switchout after the KTR_SCHED prints above so kse - * selecting a new thread to run does not show up as a preemption. - */ -#ifdef KSE - if ((flags & SW_VOL) && (td->td_proc->p_flag & P_SA)) - newtd = thread_switchout(td, flags, newtd); -#endif sched_switch(td, newtd, flags); CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d", td, td->td_name, td->td_priority); - CTR4(KTR_PROC, "mi_switch: new thread %ld (kse %p, pid %ld, %s)", + CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)", td->td_tid, td->td_sched, p->p_pid, td->td_name); /* @@ -500,8 +492,6 @@ setrunnable(struct thread *td) /* * Compute a tenex style load average of a quantity on * 1, 5 and 15 minute intervals. - * XXXKSE Needs complete rewrite when correct info is available. - * Completely Bogus.. only works with 1:1 (but compiles ok now :-) */ static void loadav(void *arg) diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index db6d71a..ec63f89 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -68,43 +68,12 @@ int max_threads_hits; SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, &max_threads_hits, 0, ""); -#ifdef KSE -int virtual_cpu; - -#endif TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); static struct mtx zombie_lock; MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); static void thread_zombie(struct thread *); -#ifdef KSE -static int -sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) -{ - int error, new_val; - int def_val; - - def_val = mp_ncpus; - if (virtual_cpu == 0) - new_val = def_val; - else - new_val = virtual_cpu; - error = sysctl_handle_int(oidp, &new_val, 0, req); - if (error != 0 || req->newptr == NULL) - return (error); - if (new_val < 0) - return (EINVAL); - virtual_cpu = new_val; - return (0); -} - -/* DEBUG ONLY */ -SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, - 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", - "debug virtual cpus"); -#endif - struct mtx tid_lock; static struct unrhdr *tid_unrhdr; @@ -230,9 +199,6 @@ void proc_linkup(struct proc *p, struct thread *td) { -#ifdef KSE - TAILQ_INIT(&p->p_upcalls); /* upcall list */ -#endif sigqueue_init(&p->p_sigqueue, p); p->p_ksi = ksiginfo_alloc(1); if (p->p_ksi != NULL) { @@ -258,9 +224,6 @@ threadinit(void) thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), thread_ctor, thread_dtor, thread_init, thread_fini, 16 - 1, 0); -#ifdef KSE - kseinit(); /* set up kse specific stuff e.g. upcall zone*/ -#endif } /* @@ -286,7 +249,7 @@ thread_stash(struct thread *td) } /* - * Reap zombie kse resource. + * Reap zombie resources. */ void thread_reap(void) @@ -311,9 +274,6 @@ thread_reap(void) td_first = td_next; } } -#ifdef KSE - upcall_reap(); -#endif } /* @@ -343,12 +303,7 @@ thread_alloc(void) void thread_free(struct thread *td) { -#ifdef KSE - if (td->td_cpuset != NULL) - cpuset_rel(td->td_cpuset); -#else cpuset_rel(td->td_cpuset); -#endif td->td_cpuset = NULL; cpu_thread_free(td); if (td->td_altkstack != 0) @@ -365,29 +320,7 @@ thread_free(struct thread *td) * Because we can't free a thread while we're operating under its context, * push the current thread into our CPU's deadthread holder. This means * we needn't worry about someone else grabbing our context before we - * do a cpu_throw(). This may not be needed now as we are under schedlock. - * Maybe we can just do a thread_stash() as thr_exit1 does. - */ -/* XXX - * libthr expects its thread exit to return for the last - * thread, meaning that the program is back to non-threaded - * mode I guess. Because we do this (cpu_throw) unconditionally - * here, they have their own version of it. (thr_exit1()) - * that doesn't do it all if this was the last thread. - * It is also called from thread_suspend_check(). - * Of course in the end, they end up coming here through exit1 - * anyhow.. After fixing 'thr' to play by the rules we should be able - * to merge these two functions together. - * - * called from: - * exit1() - * kse_exit() - * thr_exit() - * ifdef KSE - * thread_user_enter() - * thread_userret() - * endif - * thread_suspend_check() + * do a cpu_throw(). */ void thread_exit(void) @@ -413,17 +346,6 @@ thread_exit(void) AUDIT_SYSCALL_EXIT(0, td); #endif -#ifdef KSE - if (td->td_standin != NULL) { - /* - * Note that we don't need to free the cred here as it - * is done in thread_reap(). - */ - thread_zombie(td->td_standin); - td->td_standin = NULL; - } -#endif - umtx_thread_exit(td); /* @@ -453,11 +375,7 @@ thread_exit(void) if (p->p_flag & P_HADTHREADS) { if (p->p_numthreads > 1) { thread_lock(td); -#ifdef KSE - kse_unlink(td); -#else thread_unlink(td); -#endif thread_unlock(td); td2 = FIRST_THREAD_IN_PROC(p); sched_exit_thread(td2, td); @@ -480,16 +398,6 @@ thread_exit(void) } else { /* * The last thread is exiting.. but not through exit() - * what should we do? - * Theoretically this can't happen - * exit1() - clears threading flags before coming here - * kse_exit() - treats last thread specially - * thr_exit() - treats last thread specially - * ifdef KSE - * thread_user_enter() - only if more exist - * thread_userret() - only if more exist - * endif - * thread_suspend_check() - only if more exist */ panic ("thread_exit: Last thread exiting on its own"); } @@ -518,16 +426,6 @@ thread_wait(struct proc *p) mtx_assert(&Giant, MA_NOTOWNED); KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); td = FIRST_THREAD_IN_PROC(p); -#ifdef KSE - if (td->td_standin != NULL) { - if (td->td_standin->td_ucred != NULL) { - crfree(td->td_standin->td_ucred); - td->td_standin->td_ucred = NULL; - } - thread_free(td->td_standin); - td->td_standin = NULL; - } -#endif /* Lock the last thread so we spin until it exits cpu_throw(). */ thread_lock(td); thread_unlock(td); @@ -545,13 +443,6 @@ thread_wait(struct proc *p) * Link a thread to a process. * set up anything that needs to be initialized for it to * be used by the process. - * - * Note that we do not link to the proc's ucred here. - * The thread is linked as if running but no KSE assigned. - * Called from: - * proc_linkup() - * thread_schedule_upcall() - * thr_create() */ void thread_link(struct thread *td, struct proc *p) @@ -577,9 +468,6 @@ thread_link(struct thread *td, struct proc *p) /* * Convert a process with one thread to an unthreaded process. - * Called from: - * thread_single(exit) (called from execve and exit) - * kse_exit() XXX may need cleaning up wrt KSE stuff */ void thread_unthread(struct thread *td) @@ -587,20 +475,7 @@ thread_unthread(struct thread *td) struct proc *p = td->td_proc; KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); -#ifdef KSE - thread_lock(td); - upcall_remove(td); - thread_unlock(td); - p->p_flag &= ~(P_SA|P_HADTHREADS); - td->td_mailbox = NULL; - td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND); - if (td->td_standin != NULL) { - thread_zombie(td->td_standin); - td->td_standin = NULL; - } -#else p->p_flag &= ~P_HADTHREADS; -#endif } /* diff --git a/sys/kern/p1003_1b.c b/sys/kern/p1003_1b.c index 5fae20d..6d05972 100644 --- a/sys/kern/p1003_1b.c +++ b/sys/kern/p1003_1b.c @@ -156,7 +156,7 @@ sched_getparam(struct thread *td, struct sched_getparam_args *uap) if (targetp == NULL) { return (ESRCH); } - targettd = FIRST_THREAD_IN_PROC(targetp); /* XXXKSE */ + targettd = FIRST_THREAD_IN_PROC(targetp); } e = p_cansee(td, targetp); @@ -223,7 +223,7 @@ sched_getscheduler(struct thread *td, struct sched_getscheduler_args *uap) e = ESRCH; goto done2; } - targettd = FIRST_THREAD_IN_PROC(targetp); /* XXXKSE */ + targettd = FIRST_THREAD_IN_PROC(targetp); } e = p_cansee(td, targetp); diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 0232e09..17ae0be 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -383,8 +383,6 @@ schedcpu(void) /* * ts_pctcpu is only for ps and ttyinfo(). - * Do it per td_sched, and add them up at the end? - * XXXKSE */ ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; /* diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index ebdc5e0..563d0aa 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -2013,15 +2013,6 @@ sched_exit_thread(struct thread *td, struct thread *child) CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", child, child->td_name, child->td_priority); -#ifdef KSE - /* - * KSE forks and exits so often that this penalty causes short-lived - * threads to always be non-interactive. This causes mozilla to - * crawl under load. - */ - if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc) - return; -#endif /* * Give the child's runtime to the parent without returning the * sleep time as a penalty to the parent. This causes shells that diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c index dca91ff..175cc60 100644 --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -410,13 +410,8 @@ sleepq_catch_signals(void *wchan, int pri) PROC_UNLOCK(p); thread_lock(td); if (ret == 0) { - if (!(td->td_flags & TDF_INTERRUPT)) { - sleepq_switch(wchan, pri); - return (0); - } - /* KSE threads tried unblocking us. */ - ret = td->td_intrval; - MPASS(ret == EINTR || ret == ERESTART || ret == EWOULDBLOCK); + sleepq_switch(wchan, pri); + return (0); } /* * There were pending signals and this thread is still @@ -540,9 +535,6 @@ sleepq_check_signals(void) return (td->td_intrval); } - if (td->td_flags & TDF_INTERRUPT) - return (td->td_intrval); - return (0); } diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index d8168af..1701fba 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -119,15 +119,6 @@ userret(struct thread *td, struct trapframe *frame) thread_suspend_check(0); /* Can suspend or kill */ PROC_UNLOCK(p); } - -#ifdef KSE - /* - * Do special thread processing, e.g. upcall tweaking and such. - */ - if (p->p_flag & P_SA) - thread_userret(td, frame); -#endif - /* * Charge system time if profiling. */ @@ -135,7 +126,6 @@ userret(struct thread *td, struct trapframe *frame) addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio); } - /* * Let the scheduler adjust our priority etc. */ @@ -173,11 +163,6 @@ ast(struct trapframe *framep) td->td_frame = framep; td->td_pticks = 0; -#ifdef KSE - if ((p->p_flag & P_SA) && (td->td_mailbox == NULL)) - thread_user_enter(td); -#endif - /* * This updates the td_flag's for the checks below in one * "atomic" operation with turning off the astpending flag. @@ -188,18 +173,11 @@ ast(struct trapframe *framep) thread_lock(td); flags = td->td_flags; td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | - TDF_NEEDRESCHED | TDF_INTERRUPT | TDF_ALRMPEND | TDF_PROFPEND | + TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND); thread_unlock(td); PCPU_INC(cnt.v_trap); - /* - * XXXKSE While the fact that we owe a user profiling - * tick is stored per thread in this code, the statistics - * themselves are still stored per process. - * This should probably change, by which I mean that - * possibly the location of both might change. - */ if (td->td_ucred != p->p_ucred) cred_update_thread(td); if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) { diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c index 9e2ac7d..b213766 100644 --- a/sys/kern/subr_witness.c +++ b/sys/kern/subr_witness.c @@ -450,7 +450,6 @@ static struct witness_order_list_entry order_lists[] = { #endif { "clk", &lock_class_mtx_spin }, { "mprof lock", &lock_class_mtx_spin }, - { "kse lock", &lock_class_mtx_spin }, { "zombie lock", &lock_class_mtx_spin }, { "ALD Queue", &lock_class_mtx_spin }, #ifdef __ia64__ diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index 432138c..77ddee5 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -803,11 +803,6 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) * you should use PT_SUSPEND to suspend it before * continuing process. */ -#ifdef KSE - PROC_SUNLOCK(p); - thread_continued(p); - PROC_SLOCK(p); -#endif p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); thread_unsuspend(p); PROC_SUNLOCK(p); @@ -943,17 +938,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) pl->pl_event = PL_EVENT_SIGNAL; else pl->pl_event = 0; -#ifdef KSE - if (td2->td_pflags & TDP_SA) { - pl->pl_flags = PL_FLAG_SA; - if (td2->td_upcall && !TD_CAN_UNBIND(td2)) - pl->pl_flags |= PL_FLAG_BOUND; - } else { - pl->pl_flags = 0; - } -#else pl->pl_flags = 0; -#endif pl->pl_sigmask = td2->td_sigmask; pl->pl_siglist = td2->td_siglist; break; diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c index ac584a3..4503be4 100644 --- a/sys/kern/syscalls.c +++ b/sys/kern/syscalls.c @@ -386,11 +386,11 @@ const char *syscallnames[] = { "eaccess", /* 376 = eaccess */ "#377", /* 377 = afs_syscall */ "nmount", /* 378 = nmount */ - "kse_exit", /* 379 = kse_exit */ - "kse_wakeup", /* 380 = kse_wakeup */ - "kse_create", /* 381 = kse_create */ - "kse_thr_interrupt", /* 382 = kse_thr_interrupt */ - "kse_release", /* 383 = kse_release */ + "#379", /* 379 = kse_exit */ + "#380", /* 380 = kse_wakeup */ + "#381", /* 381 = kse_create */ + "#382", /* 382 = kse_thr_interrupt */ + "#383", /* 383 = kse_release */ "__mac_get_proc", /* 384 = __mac_get_proc */ "__mac_set_proc", /* 385 = __mac_set_proc */ "__mac_get_fd", /* 386 = __mac_get_fd */ @@ -447,7 +447,7 @@ const char *syscallnames[] = { "extattr_list_fd", /* 437 = extattr_list_fd */ "extattr_list_file", /* 438 = extattr_list_file */ "extattr_list_link", /* 439 = extattr_list_link */ - "kse_switchin", /* 440 = kse_switchin */ + "#440", /* 440 = kse_switchin */ "ksem_timedwait", /* 441 = ksem_timedwait */ "thr_suspend", /* 442 = thr_suspend */ "thr_wake", /* 443 = thr_wake */ diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master index c31df8d..59ef46e 100644 --- a/sys/kern/syscalls.master +++ b/sys/kern/syscalls.master @@ -664,14 +664,11 @@ 377 AUE_NULL UNIMPL afs_syscall 378 AUE_NMOUNT STD { int nmount(struct iovec *iovp, \ unsigned int iovcnt, int flags); } -379 AUE_NULL STD { int kse_exit(void); } -380 AUE_NULL STD { int kse_wakeup(struct kse_mailbox *mbx); } -381 AUE_NULL STD { int kse_create(struct kse_mailbox *mbx, \ - int newgroup); } -382 AUE_NULL STD { int kse_thr_interrupt( \ - struct kse_thr_mailbox *tmbx, int cmd, \ - long data); } -383 AUE_NULL STD { int kse_release(struct timespec *timeout); } +379 AUE_NULL UNIMPL kse_exit +380 AUE_NULL UNIMPL kse_wakeup +381 AUE_NULL UNIMPL kse_create +382 AUE_NULL UNIMPL kse_thr_interrupt +383 AUE_NULL UNIMPL kse_release 384 AUE_NULL STD { int __mac_get_proc(struct mac *mac_p); } 385 AUE_NULL STD { int __mac_set_proc(struct mac *mac_p); } 386 AUE_NULL STD { int __mac_get_fd(int fd, \ @@ -772,9 +769,7 @@ 439 AUE_EXTATTR_LIST_LINK STD { ssize_t extattr_list_link( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } -440 AUE_NULL STD { int kse_switchin( \ - struct kse_thr_mailbox *tmbx, \ - int flags); } +440 AUE_NULL UNIMPL kse_switchin 441 AUE_NULL NOSTD { int ksem_timedwait(semid_t id, \ const struct timespec *abstime); } 442 AUE_NULL STD { int thr_suspend( \ diff --git a/sys/kern/systrace_args.c b/sys/kern/systrace_args.c index 9b5b706..8ae6875 100644 --- a/sys/kern/systrace_args.c +++ b/sys/kern/systrace_args.c @@ -2067,42 +2067,6 @@ systrace_args(int sysnum, void *params, u_int64_t *uarg, int *n_args) *n_args = 3; break; } - /* kse_exit */ - case 379: { - *n_args = 0; - break; - } - /* kse_wakeup */ - case 380: { - struct kse_wakeup_args *p = params; - uarg[0] = (intptr_t) p->mbx; /* struct kse_mailbox * */ - *n_args = 1; - break; - } - /* kse_create */ - case 381: { - struct kse_create_args *p = params; - uarg[0] = (intptr_t) p->mbx; /* struct kse_mailbox * */ - iarg[1] = p->newgroup; /* int */ - *n_args = 2; - break; - } - /* kse_thr_interrupt */ - case 382: { - struct kse_thr_interrupt_args *p = params; - uarg[0] = (intptr_t) p->tmbx; /* struct kse_thr_mailbox * */ - iarg[1] = p->cmd; /* int */ - iarg[2] = p->data; /* long */ - *n_args = 3; - break; - } - /* kse_release */ - case 383: { - struct kse_release_args *p = params; - uarg[0] = (intptr_t) p->timeout; /* struct timespec * */ - *n_args = 1; - break; - } /* __mac_get_proc */ case 384: { struct __mac_get_proc_args *p = params; @@ -2534,14 +2498,6 @@ systrace_args(int sysnum, void *params, u_int64_t *uarg, int *n_args) *n_args = 4; break; } - /* kse_switchin */ - case 440: { - struct kse_switchin_args *p = params; - uarg[0] = (intptr_t) p->tmbx; /* struct kse_thr_mailbox * */ - iarg[1] = p->flags; /* int */ - *n_args = 2; - break; - } /* ksem_timedwait */ case 441: { struct ksem_timedwait_args *p = params; |