summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2002-10-30 02:28:41 +0000
committerdavidxu <davidxu@FreeBSD.org>2002-10-30 02:28:41 +0000
commit7531bd3c2ff53e246074da26bccd32325d78ff40 (patch)
tree025f0097da64c2bd8954b19f178a9e91c5932b96 /sys/kern
parent869699f61c0699c54295b52a7d4d69ea9d6e0453 (diff)
downloadFreeBSD-src-7531bd3c2ff53e246074da26bccd32325d78ff40.zip
FreeBSD-src-7531bd3c2ff53e246074da26bccd32325d78ff40.tar.gz
Add an actual implementation of kse_thr_interrupt()
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_kse.c22
-rw-r--r--sys/kern/kern_synch.c14
-rw-r--r--sys/kern/kern_thread.c22
3 files changed, 54 insertions, 4 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index eb6db57..540cf37 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -270,8 +270,26 @@ proc_linkup(struct proc *p, struct ksegrp *kg,
int
kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
{
+ struct proc *p;
+ struct thread *td2;
- return(ENOSYS);
+ p = td->td_proc;
+ mtx_lock_spin(&sched_lock);
+ FOREACH_THREAD_IN_PROC(p, td2) {
+ if (td2->td_mailbox == uap->tmbx) {
+ td2->td_flags |= TDF_INTERRUPT;
+ if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
+ if (td2->td_flags & TDF_CVWAITQ)
+ cv_abort(td2);
+ else
+ abortsleep(td2);
+ }
+ mtx_unlock_spin(&sched_lock);
+ return 0;
+ }
+ }
+ mtx_unlock_spin(&sched_lock);
+ return(ESRCH);
}
int
@@ -1390,7 +1408,9 @@ thread_userret(struct thread *td, struct trapframe *frame)
* it would be nice if this all happenned only on the first time
* through. (the scan for extra work etc.)
*/
+ mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_UPCALLING;
+ mtx_unlock_spin(&sched_lock);
#if 0
error = suword((caddr_t)ke->ke_mailbox +
offsetof(struct kse_mailbox, km_curthread), 0);
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index abbd7b5..ab5427f 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -157,10 +157,15 @@ msleep(ident, mtx, priority, wmesg, timo)
if (p->p_flag & P_KSES) {
/*
* Just don't bother if we are exiting
- * and not the exiting thread.
+ * and not the exiting thread or thread was marked as
+ * interrupted.
*/
- if ((p->p_flag & P_WEXIT) && catch && (p->p_singlethread != td))
+ if (catch &&
+ (((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) ||
+ (td->td_flags & TDF_INTERRUPT))) {
+ td->td_flags &= ~TDF_INTERRUPT;
return (EINTR);
+ }
mtx_lock_spin(&sched_lock);
if ((td->td_flags & (TDF_UNBOUND|TDF_INMSLEEP)) ==
TDF_UNBOUND) {
@@ -277,6 +282,11 @@ msleep(ident, mtx, priority, wmesg, timo)
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
td->td_flags &= ~TDF_TIMOFAIL;
+ }
+ if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
+ (rval == 0)) {
+ td->td_flags &= ~TDF_INTERRUPT;
+ rval = EINTR;
}
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index eb6db57..540cf37 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -270,8 +270,26 @@ proc_linkup(struct proc *p, struct ksegrp *kg,
int
kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
{
+ struct proc *p;
+ struct thread *td2;
- return(ENOSYS);
+ p = td->td_proc;
+ mtx_lock_spin(&sched_lock);
+ FOREACH_THREAD_IN_PROC(p, td2) {
+ if (td2->td_mailbox == uap->tmbx) {
+ td2->td_flags |= TDF_INTERRUPT;
+ if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
+ if (td2->td_flags & TDF_CVWAITQ)
+ cv_abort(td2);
+ else
+ abortsleep(td2);
+ }
+ mtx_unlock_spin(&sched_lock);
+ return 0;
+ }
+ }
+ mtx_unlock_spin(&sched_lock);
+ return(ESRCH);
}
int
@@ -1390,7 +1408,9 @@ thread_userret(struct thread *td, struct trapframe *frame)
* it would be nice if this all happenned only on the first time
* through. (the scan for extra work etc.)
*/
+ mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_UPCALLING;
+ mtx_unlock_spin(&sched_lock);
#if 0
error = suword((caddr_t)ke->ke_mailbox +
offsetof(struct kse_mailbox, km_curthread), 0);
OpenPOWER on IntegriCloud