summaryrefslogtreecommitdiffstats
path: root/sys/kern/ksched.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-08-10 22:53:32 +0000
committerjhb <jhb@FreeBSD.org>2001-08-10 22:53:32 +0000
commit4a89454dcd75ebc44e557012c2d007934836f9de (patch)
tree1798843f61bbf42ad4e659497c23572b272969ca /sys/kern/ksched.c
parent63014c2530236dbd3818166d675b28e0e61b427e (diff)
downloadFreeBSD-src-4a89454dcd75ebc44e557012c2d007934836f9de.zip
FreeBSD-src-4a89454dcd75ebc44e557012c2d007934836f9de.tar.gz
- Close races with signals and other AST's being triggered while we are in
the process of exiting the kernel. The ast() function now loops as long as the PS_ASTPENDING or PS_NEEDRESCHED flags are set. It returns with preemption disabled so that any further AST's that arrive via an interrupt will be delayed until the low-level MD code returns to user mode. - Use u_int's to store the tick counts for profiling purposes so that we do not need sched_lock just to read p_sticks. This also closes a problem where the call to addupc_task() could screw up the arithmetic due to non-atomic reads of p_sticks. - Axe need_proftick(), aston(), astoff(), astpending(), need_resched(), clear_resched(), and resched_wanted() in favor of direct bit operations on p_sflag. - Fix up locking with sched_lock some. In addupc_intr(), use sched_lock to ensure pr_addr and pr_ticks are updated atomically with setting PS_OWEUPC. In ast() we clear pr_ticks atomically with clearing PS_OWEUPC. We also do not grab the lock just to test a flag. - Simplify the handling of Giant in ast() slightly. Reviewed by: bde (mostly)
Diffstat (limited to 'sys/kern/ksched.c')
-rw-r--r--sys/kern/ksched.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c
index c7f746a..c7b6dd3 100644
--- a/sys/kern/ksched.c
+++ b/sys/kern/ksched.c
@@ -176,7 +176,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, &p->p_pri);
- need_resched(p);
+ p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}
else
@@ -198,7 +198,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
* on the scheduling code: You must leave the
* scheduling info alone.
*/
- need_resched(p);
+ p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}
break;
@@ -217,7 +217,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct proc *p)
int ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
- need_resched(curproc);
+ curproc->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
return 0;
}
OpenPOWER on IntegriCloud