diff options
author | jeff <jeff@FreeBSD.org> | 2007-06-05 00:00:57 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2007-06-05 00:00:57 +0000 |
commit | 91d150179059555ef497f4b5b5a560fdb24e472f (patch) | |
tree | 6727b982fa0d93b8aafab313bdc797aee9e314d2 /sys/kern/sys_process.c | |
parent | 8297f778b9d0a595a99ca58d332ab4111b636019 (diff) | |
download | FreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.zip FreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.tar.gz |
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/kern/sys_process.c')
-rw-r--r-- | sys/kern/sys_process.c | 33 |
1 files changed, 18 insertions, 15 deletions
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index 785d45d..0ac9706 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -527,12 +527,12 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); - mtx_lock_spin(&sched_lock); + PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td2) { if (td2->td_tid == pid) break; } - mtx_unlock_spin(&sched_lock); + PROC_SUNLOCK(p); if (td2 != NULL) break; /* proc lock held */ PROC_UNLOCK(p); @@ -701,15 +701,15 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) break; case PT_SUSPEND: - mtx_lock_spin(&sched_lock); + thread_lock(td2); td2->td_flags |= TDF_DBSUSPEND; - mtx_unlock_spin(&sched_lock); + thread_unlock(td2); break; case PT_RESUME: - mtx_lock_spin(&sched_lock); + thread_lock(td2); td2->td_flags &= ~TDF_DBSUSPEND; - mtx_unlock_spin(&sched_lock); + thread_unlock(td2); break; case PT_STEP: @@ -780,32 +780,35 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) proctree_locked = 0; } /* deliver or queue signal */ - mtx_lock_spin(&sched_lock); + thread_lock(td2); td2->td_flags &= ~TDF_XSIG; - mtx_unlock_spin(&sched_lock); + thread_unlock(td2); td2->td_xsig = data; p->p_xstat = data; p->p_xthread = NULL; if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { - mtx_lock_spin(&sched_lock); + PROC_SLOCK(p); if (req == PT_DETACH) { struct thread *td3; - FOREACH_THREAD_IN_PROC(p, td3) + FOREACH_THREAD_IN_PROC(p, td3) { + thread_lock(td3); td3->td_flags &= ~TDF_DBSUSPEND; + thread_unlock(td3); + } } /* * unsuspend all threads, to not let a thread run, * you should use PT_SUSPEND to suspend it before * continuing process. */ - mtx_unlock_spin(&sched_lock); #ifdef KSE + PROC_SUNLOCK(p); thread_continued(p); + PROC_SLOCK(p); #endif p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); - mtx_lock_spin(&sched_lock); thread_unsuspend(p); - mtx_unlock_spin(&sched_lock); + PROC_SUNLOCK(p); } if (data) @@ -968,13 +971,13 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); tmp = 0; PROC_LOCK(p); - mtx_lock_spin(&sched_lock); + PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td2) { if (tmp >= num) break; buf[tmp++] = td2->td_tid; } - mtx_unlock_spin(&sched_lock); + PROC_SUNLOCK(p); PROC_UNLOCK(p); error = copyout(buf, addr, tmp * sizeof(lwpid_t)); free(buf, M_TEMP); |