summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_lockf.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
committerjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
commit91d150179059555ef497f4b5b5a560fdb24e472f (patch)
tree6727b982fa0d93b8aafab313bdc797aee9e314d2 /sys/kern/kern_lockf.c
parent8297f778b9d0a595a99ca58d332ab4111b636019 (diff)
downloadFreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.zip
FreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.tar.gz
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling sychronization. - Use the per-process spinlock rather than the sched_lock for per-process scheduling synchronization. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/kern/kern_lockf.c')
-rw-r--r--sys/kern/kern_lockf.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c
index 483f953..aaedc11 100644
--- a/sys/kern/kern_lockf.c
+++ b/sys/kern/kern_lockf.c
@@ -266,16 +266,19 @@ lf_setlock(lock)
*/
if ((lock->lf_flags & F_POSIX) &&
(block->lf_flags & F_POSIX)) {
- register struct proc *wproc;
+ struct proc *wproc;
+ struct proc *nproc;
struct thread *td;
- register struct lockf *waitblock;
+ struct lockf *waitblock;
int i = 0;
/* The block is waiting on something */
- /* XXXKSE this is not complete under threads */
wproc = (struct proc *)block->lf_id;
- mtx_lock_spin(&sched_lock);
+restart:
+ nproc = NULL;
+ PROC_SLOCK(wproc);
FOREACH_THREAD_IN_PROC(wproc, td) {
+ thread_lock(td);
while (td->td_wchan &&
(td->td_wmesg == lockstr) &&
(i++ < maxlockdepth)) {
@@ -284,15 +287,20 @@ lf_setlock(lock)
waitblock = waitblock->lf_next;
if ((waitblock->lf_flags & F_POSIX) == 0)
break;
- wproc = (struct proc *)waitblock->lf_id;
- if (wproc == (struct proc *)lock->lf_id) {
- mtx_unlock_spin(&sched_lock);
+ nproc = (struct proc *)waitblock->lf_id;
+ if (nproc == (struct proc *)lock->lf_id) {
+ PROC_SUNLOCK(wproc);
+ thread_unlock(td);
free(lock, M_LOCKF);
return (EDEADLK);
}
}
+ thread_unlock(td);
}
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(wproc);
+ wproc = nproc;
+ if (wproc)
+ goto restart;
}
/*
* For flock type locks, we must first remove
OpenPOWER on IntegriCloud