summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_trap.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
committerjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
commit91d150179059555ef497f4b5b5a560fdb24e472f (patch)
tree6727b982fa0d93b8aafab313bdc797aee9e314d2 /sys/kern/subr_trap.c
parent8297f778b9d0a595a99ca58d332ab4111b636019 (diff)
downloadFreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.zip
FreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.tar.gz
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling sychronization. - Use the per-process spinlock rather than the sched_lock for per-process scheduling synchronization. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/kern/subr_trap.c')
-rw-r--r--sys/kern/subr_trap.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index f839ace..6fc92cc 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -82,11 +82,11 @@ userret(struct thread *td, struct trapframe *frame)
#ifdef DIAGNOSTIC
/* Check that we called signotify() enough. */
PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
+ thread_lock(td);
if (SIGPENDING(td) && ((td->td_flags & TDF_NEEDSIGCHK) == 0 ||
(td->td_flags & TDF_ASTPENDING) == 0))
printf("failed to set signal flags properly for ast()\n");
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
PROC_UNLOCK(p);
#endif
@@ -163,7 +163,7 @@ ast(struct trapframe *framep)
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_assert(&sched_lock, MA_NOTOWNED);
+ THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
td->td_frame = framep;
td->td_pticks = 0;
@@ -179,8 +179,7 @@ ast(struct trapframe *framep)
* AST's saved in sflag, the astpending flag will be set and
* ast() will be called again.
*/
- mtx_lock_spin(&sched_lock);
- flags = td->td_flags;
+ PROC_SLOCK(p);
sflag = p->p_sflag;
if (p->p_sflag & (PS_ALRMPEND | PS_PROFPEND))
p->p_sflag &= ~(PS_ALRMPEND | PS_PROFPEND);
@@ -188,9 +187,12 @@ ast(struct trapframe *framep)
if (p->p_sflag & PS_MACPEND)
p->p_sflag &= ~PS_MACPEND;
#endif
+ thread_lock(td);
+ PROC_SUNLOCK(p);
+ flags = td->td_flags;
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
TDF_NEEDRESCHED | TDF_INTERRUPT);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
PCPU_INC(cnt.v_trap);
/*
@@ -239,10 +241,11 @@ ast(struct trapframe *framep)
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 1);
#endif
- mtx_lock_spin(&sched_lock);
+ thread_lock(td);
sched_prio(td, td->td_user_pri);
+ SCHED_STAT_INC(switch_needresched);
mi_switch(SW_INVOL, NULL);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 1);
OpenPOWER on IntegriCloud