summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
committerjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
commit91d150179059555ef497f4b5b5a560fdb24e472f (patch)
tree6727b982fa0d93b8aafab313bdc797aee9e314d2 /sys/vm
parent8297f778b9d0a595a99ca58d332ab4111b636019 (diff)
downloadFreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.zip
FreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.tar.gz
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling sychronization. - Use the per-process spinlock rather than the sched_lock for per-process scheduling synchronization. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_glue.c65
-rw-r--r--sys/vm/vm_meter.c12
-rw-r--r--sys/vm/vm_pageout.c19
-rw-r--r--sys/vm/vm_zeroidle.c8
4 files changed, 63 insertions, 41 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index cb2a657..3a08855 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -619,24 +619,26 @@ faultin(p)
* busy swapping it in.
*/
++p->p_lock;
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
p->p_sflag |= PS_SWAPPINGIN;
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
PROC_UNLOCK(p);
FOREACH_THREAD_IN_PROC(p, td)
vm_thread_swapin(td);
PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
p->p_sflag &= ~PS_SWAPPINGIN;
p->p_sflag |= PS_INMEM;
FOREACH_THREAD_IN_PROC(p, td) {
+ thread_lock(td);
TD_CLR_SWAPPED(td);
if (TD_CAN_RUN(td))
setrunnable(td);
+ thread_unlock(td);
}
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
wakeup(&p->p_sflag);
@@ -672,9 +674,9 @@ scheduler(dummy)
loop:
if (vm_page_count_min()) {
VM_WAIT;
- mtx_lock_spin(&sched_lock);
+ thread_lock(&thread0);
proc0_rescan = 0;
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(&thread0);
goto loop;
}
@@ -685,13 +687,14 @@ loop:
if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
continue;
}
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
FOREACH_THREAD_IN_PROC(p, td) {
/*
* An otherwise runnable thread of a process
* swapped out has only the TDI_SWAPPED bit set.
*
*/
+ thread_lock(td);
if (td->td_inhibitors == TDI_SWAPPED) {
pri = p->p_swtime + td->td_slptime;
if ((p->p_sflag & PS_SWAPINREQ) == 0) {
@@ -709,8 +712,9 @@ loop:
ppri = pri;
}
}
+ thread_unlock(td);
}
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
}
sx_sunlock(&allproc_lock);
@@ -718,13 +722,13 @@ loop:
* Nothing to do, back to sleep.
*/
if ((p = pp) == NULL) {
- mtx_lock_spin(&sched_lock);
+ thread_lock(&thread0);
if (!proc0_rescan) {
TD_SET_IWAIT(&thread0);
mi_switch(SW_VOL, NULL);
}
proc0_rescan = 0;
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(&thread0);
goto loop;
}
PROC_LOCK(p);
@@ -736,15 +740,15 @@ loop:
*/
if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
PROC_UNLOCK(p);
- mtx_lock_spin(&sched_lock);
+ thread_lock(&thread0);
proc0_rescan = 0;
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(&thread0);
goto loop;
}
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
p->p_sflag &= ~PS_SWAPINREQ;
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
/*
* We would like to bring someone in. (only if there is space).
@@ -752,10 +756,12 @@ loop:
*/
faultin(p);
PROC_UNLOCK(p);
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
p->p_swtime = 0;
+ PROC_SUNLOCK(p);
+ thread_lock(&thread0);
proc0_rescan = 0;
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(&thread0);
goto loop;
}
@@ -763,7 +769,8 @@ void kick_proc0(void)
{
struct thread *td = &thread0;
-
+ /* XXX This will probably cause a LOR in some cases */
+ thread_lock(td);
if (TD_AWAITING_INTR(td)) {
CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0);
TD_CLR_IWAIT(td);
@@ -773,6 +780,7 @@ void kick_proc0(void)
CTR2(KTR_INTR, "%s: state %d",
__func__, td->td_state);
}
+ thread_unlock(td);
}
@@ -821,12 +829,12 @@ retry:
* creation. It may have no
* address space or lock yet.
*/
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
if (p->p_state == PRS_NEW) {
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
continue;
}
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
/*
* An aio daemon switches its
@@ -876,7 +884,7 @@ retry:
break;
case PRS_NORMAL:
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
/*
* do not swapout a realtime process
* Check all the thread groups..
@@ -929,7 +937,7 @@ retry:
(minslptime > swap_idle_threshold2))) {
swapout(p);
didswap++;
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
PROC_UNLOCK(p);
vm_map_unlock(&vm->vm_map);
vmspace_free(vm);
@@ -937,7 +945,7 @@ retry:
goto retry;
}
nextproc:
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
}
nextproc2:
PROC_UNLOCK(p);
@@ -962,7 +970,7 @@ swapout(p)
struct thread *td;
PROC_LOCK_ASSERT(p, MA_OWNED);
- mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
+ mtx_assert(&p->p_slock, MA_OWNED | MA_NOTRECURSED);
#if defined(SWAP_DEBUG)
printf("swapping out %d\n", p->p_pid);
#endif
@@ -996,15 +1004,18 @@ swapout(p)
p->p_sflag &= ~PS_INMEM;
p->p_sflag |= PS_SWAPPINGOUT;
PROC_UNLOCK(p);
- FOREACH_THREAD_IN_PROC(p, td)
+ FOREACH_THREAD_IN_PROC(p, td) {
+ thread_lock(td);
TD_SET_SWAPPED(td);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
+ }
+ PROC_SUNLOCK(p);
FOREACH_THREAD_IN_PROC(p, td)
vm_thread_swapout(td);
PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
p->p_sflag &= ~PS_SWAPPINGOUT;
p->p_swtime = 0;
}
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index d4b51e7..4d70155 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -131,17 +131,21 @@ vmtotal(SYSCTL_HANDLER_ARGS)
FOREACH_PROC_IN_SYSTEM(p) {
if (p->p_flag & P_SYSTEM)
continue;
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
switch (p->p_state) {
case PRS_NEW:
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
continue;
break;
default:
FOREACH_THREAD_IN_PROC(p, td) {
/* Need new statistics XXX */
+ thread_lock(td);
switch (td->td_state) {
case TDS_INHIBITED:
+ /*
+ * XXX stats no longer synchronized.
+ */
if (TD_ON_LOCK(td) ||
(td->td_inhibitors ==
TDI_SWAPPED)) {
@@ -162,13 +166,15 @@ vmtotal(SYSCTL_HANDLER_ARGS)
case TDS_RUNQ:
case TDS_RUNNING:
total.t_rq++;
+ thread_unlock(td);
continue;
default:
break;
}
+ thread_unlock(td);
}
}
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
/*
* Note active objects.
*/
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index dcf69ef..99630ce 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1246,22 +1246,24 @@ unlock_and_continue:
* If the process is in a non-running type state,
* don't touch it. Check all the threads individually.
*/
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
breakout = 0;
FOREACH_THREAD_IN_PROC(p, td) {
+ thread_lock(td);
if (!TD_ON_RUNQ(td) &&
!TD_IS_RUNNING(td) &&
!TD_IS_SLEEPING(td)) {
+ thread_unlock(td);
breakout = 1;
break;
}
+ thread_unlock(td);
}
+ PROC_SUNLOCK(p);
if (breakout) {
- mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
continue;
}
- mtx_unlock_spin(&sched_lock);
/*
* get the process size
*/
@@ -1287,9 +1289,9 @@ unlock_and_continue:
sx_sunlock(&allproc_lock);
if (bigproc != NULL) {
killproc(bigproc, "out of swap space");
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(bigproc);
sched_nice(bigproc, PRIO_MIN);
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(bigproc);
PROC_UNLOCK(bigproc);
wakeup(&cnt.v_free_count);
}
@@ -1599,17 +1601,20 @@ vm_daemon()
* if the process is in a non-running type state,
* don't touch it.
*/
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
breakout = 0;
FOREACH_THREAD_IN_PROC(p, td) {
+ thread_lock(td);
if (!TD_ON_RUNQ(td) &&
!TD_IS_RUNNING(td) &&
!TD_IS_SLEEPING(td)) {
+ thread_unlock(td);
breakout = 1;
break;
}
+ thread_unlock(td);
}
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
if (breakout) {
PROC_UNLOCK(p);
continue;
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 5af84e0..b21d01f 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -145,9 +145,9 @@ vm_pagezero(void __unused *arg)
vm_page_zero_idle();
#ifndef PREEMPTION
if (sched_runnable()) {
- mtx_lock_spin(&sched_lock);
+ thread_lock(curthread);
mi_switch(SW_VOL, NULL);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(curthread);
}
#endif
} else {
@@ -176,11 +176,11 @@ pagezero_start(void __unused *arg)
PROC_LOCK(pagezero_proc);
pagezero_proc->p_flag |= P_NOLOAD;
PROC_UNLOCK(pagezero_proc);
- mtx_lock_spin(&sched_lock);
td = FIRST_THREAD_IN_PROC(pagezero_proc);
+ thread_lock(td);
sched_class(td, PRI_IDLE);
sched_prio(td, PRI_MAX_IDLE);
sched_add(td, SRQ_BORING);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
}
SYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, pagezero_start, NULL)
OpenPOWER on IntegriCloud