summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_time.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
committerjeff <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
commit91d150179059555ef497f4b5b5a560fdb24e472f (patch)
tree6727b982fa0d93b8aafab313bdc797aee9e314d2 /sys/kern/kern_time.c
parent8297f778b9d0a595a99ca58d332ab4111b636019 (diff)
downloadFreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.zip
FreeBSD-src-91d150179059555ef497f4b5b5a560fdb24e472f.tar.gz
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling sychronization. - Use the per-process spinlock rather than the sched_lock for per-process scheduling synchronization. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/kern/kern_time.c')
-rw-r--r--sys/kern/kern_time.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index c434b93..8634c8a 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -552,9 +552,9 @@ kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv)
timevalsub(&aitv->it_value, &ctv);
}
} else {
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
*aitv = p->p_stats->p_timer[which];
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
}
return (0);
}
@@ -623,10 +623,10 @@ kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv,
timevalsub(&oitv->it_value, &ctv);
}
} else {
- mtx_lock_spin(&sched_lock);
+ PROC_SLOCK(p);
*oitv = p->p_stats->p_timer[which];
p->p_stats->p_timer[which] = *aitv;
- mtx_unlock_spin(&sched_lock);
+ PROC_SUNLOCK(p);
}
return (0);
}
OpenPOWER on IntegriCloud