summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2003-04-22 20:50:38 +0000
committerjhb <jhb@FreeBSD.org>2003-04-22 20:50:38 +0000
commit41837c0a1432e00773a9f7e1c844133dc3f62fc2 (patch)
tree7e1643dec04960e48deaf7881539cca7b70f183e /sys/kern/sched_4bsd.c
parentced60d737ac0e99b71a066dfb899c47260e50d34 (diff)
downloadFreeBSD-src-41837c0a1432e00773a9f7e1c844133dc3f62fc2.zip
FreeBSD-src-41837c0a1432e00773a9f7e1c844133dc3f62fc2.tar.gz
- Assert that the proc lock and sched_lock are held in sched_nice().
- For the 4BSD scheduler, this means that all callers of the static function resetpriority() now always hold sched_lock, so don't lock sched_lock explicitly in that function.
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 219405f..ee20384 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -378,7 +378,6 @@ resetpriority(struct ksegrp *kg)
register unsigned int newpriority;
struct thread *td;
- mtx_lock_spin(&sched_lock);
if (kg->kg_pri_class == PRI_TIMESHARE) {
newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
@@ -389,7 +388,6 @@ resetpriority(struct ksegrp *kg)
FOREACH_THREAD_IN_GROUP(kg, td) {
maybe_resched(td); /* XXXKSE silly */
}
- mtx_unlock_spin(&sched_lock);
}
/* ARGSUSED */
@@ -514,6 +512,9 @@ sched_fork_thread(struct thread *td, struct thread *child)
void
sched_nice(struct ksegrp *kg, int nice)
{
+
+ PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
+ mtx_assert(&sched_lock, MA_OWNED);
kg->kg_nice = nice;
resetpriority(kg);
}
OpenPOWER on IntegriCloud