summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
committerjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
commit91d150179059555ef497f4b5b5a560fdb24e472f (patch)
tree6727b982fa0d93b8aafab313bdc797aee9e314d2 /sys/i386
parent8297f778b9d0a595a99ca58d332ab4111b636019 (diff)
downloadFreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.zip
FreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.tar.gz
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling sychronization. - Use the per-process spinlock rather than the sched_lock for per-process scheduling synchronization. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/machdep.c8
-rw-r--r--sys/i386/isa/npx.c4
-rw-r--r--sys/i386/linux/linux_machdep.c12
3 files changed, 12 insertions, 12 deletions
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index f9398df..15dc664 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -1058,9 +1058,9 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
#ifdef SMP
/* Schedule ourselves on the indicated cpu. */
- mtx_lock_spin(&sched_lock);
+ thread_lock(curthread);
sched_bind(curthread, cpu_id);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(curthread);
#endif
/* Calibrate by measuring a short delay. */
@@ -1071,9 +1071,9 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
intr_restore(reg);
#ifdef SMP
- mtx_lock_spin(&sched_lock);
+ thread_lock(curthread);
sched_unbind(curthread);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(curthread);
#endif
/*
diff --git a/sys/i386/isa/npx.c b/sys/i386/isa/npx.c
index e9ba8b0..c5d381e 100644
--- a/sys/i386/isa/npx.c
+++ b/sys/i386/isa/npx.c
@@ -230,9 +230,9 @@ npx_intr(dummy)
td = PCPU_GET(fpcurthread);
if (td != NULL) {
td->td_pcb->pcb_flags |= PCB_NPXTRAP;
- mtx_lock_spin(&sched_lock);
+ thread_lock(td);
td->td_flags |= TDF_ASTPENDING;
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
}
return (FILTER_HANDLED);
}
diff --git a/sys/i386/linux/linux_machdep.c b/sys/i386/linux/linux_machdep.c
index 5f916ed..75ccd3e 100644
--- a/sys/i386/linux/linux_machdep.c
+++ b/sys/i386/linux/linux_machdep.c
@@ -325,10 +325,10 @@ linux_fork(struct thread *td, struct linux_fork_args *args)
/*
* Make this runnable after we are finished with it.
*/
- mtx_lock_spin(&sched_lock);
+ thread_lock(td2);
TD_SET_CAN_RUN(td2);
sched_add(td2, SRQ_BORING);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td2);
return (0);
}
@@ -368,10 +368,10 @@ linux_vfork(struct thread *td, struct linux_vfork_args *args)
/*
* Make this runnable after we are finished with it.
*/
- mtx_lock_spin(&sched_lock);
+ thread_lock(td2);
TD_SET_CAN_RUN(td2);
sched_add(td2, SRQ_BORING);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td2);
/* wait for the children to exit, ie. emulate vfork */
PROC_LOCK(p2);
@@ -569,10 +569,10 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
/*
* Make this runnable after we are finished with it.
*/
- mtx_lock_spin(&sched_lock);
+ thread_lock(td2);
TD_SET_CAN_RUN(td2);
sched_add(td2, SRQ_BORING);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td2);
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;
OpenPOWER on IntegriCloud