summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_intr.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-07-02 20:21:44 +0000
committerjhb <jhb@FreeBSD.org>2004-07-02 20:21:44 +0000
commit696704716d52a895094da20b7e1a0f763b069e12 (patch)
tree2a5d6a91ba98f5b9e075eecc1a9ca724b8a9110a /sys/kern/kern_intr.c
parent1f506bc6fab7cc97cb923d4af1174f9c732221dd (diff)
downloadFreeBSD-src-696704716d52a895094da20b7e1a0f763b069e12.zip
FreeBSD-src-696704716d52a895094da20b7e1a0f763b069e12.tar.gz
Implement preemption of kernel threads natively in the scheduler rather
than as one-off hacks in various other parts of the kernel: - Add a function maybe_preempt() that is called from sched_add() to determine if a thread about to be added to a run queue should be preempted to directly. If it is not safe to preempt or if the new thread does not have a high enough priority, then the function returns false and sched_add() adds the thread to the run queue. If the thread should be preempted to but the current thread is in a nested critical section, then the flag TDF_OWEPREEMPT is set and the thread is added to the run queue. Otherwise, mi_switch() is called immediately and the thread is never added to the run queue since it is switch to directly. When exiting an outermost critical section, if TDF_OWEPREEMPT is set, then clear it and call mi_switch() to perform the deferred preemption. - Remove explicit preemption from ithread_schedule() as calling setrunqueue() now does all the correct work. This also removes the do_switch argument from ithread_schedule(). - Do not use the manual preemption code in mtx_unlock if the architecture supports native preemption. - Don't call mi_switch() in a loop during shutdown to give ithreads a chance to run if the architecture supports native preemption since the ithreads will just preempt DELAY(). - Don't call mi_switch() from the page zeroing idle thread for architectures that support native preemption as it is unnecessary. - Native preemption is enabled on the same archs that supported ithread preemption, namely alpha, i386, and amd64. This change should largely be a NOP for the default case as committed except that we will do fewer context switches in a few cases and will avoid the run queues completely when preempting. Approved by: scottl (with his re@ hat)
Diffstat (limited to 'sys/kern/kern_intr.c')
-rw-r--r--sys/kern/kern_intr.c19
1 files changed, 3 insertions, 16 deletions
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index d11e9d2..99283ff 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -365,7 +365,7 @@ ok:
}
int
-ithread_schedule(struct ithd *ithread, int do_switch)
+ithread_schedule(struct ithd *ithread)
{
struct int_entropy entropy;
struct thread *td;
@@ -399,10 +399,7 @@ ithread_schedule(struct ithd *ithread, int do_switch)
/*
* Set it_need to tell the thread to keep running if it is already
* running. Then, grab sched_lock and see if we actually need to
- * put this thread on the runqueue. If so and the do_switch flag is
- * true and it is safe to switch, then switch to the ithread
- * immediately. Otherwise, set the needresched flag to guarantee
- * that this ithread will run before any userland processes.
+ * put this thread on the runqueue.
*/
ithread->it_need = 1;
mtx_lock_spin(&sched_lock);
@@ -410,16 +407,6 @@ ithread_schedule(struct ithd *ithread, int do_switch)
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
TD_CLR_IWAIT(td);
setrunqueue(td);
- if (do_switch &&
- (ctd->td_critnest == 1) ) {
- KASSERT((TD_IS_RUNNING(ctd)),
- ("ithread_schedule: Bad state for curthread."));
- if (ctd->td_flags & TDF_IDLETD)
- ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
- mi_switch(SW_INVOL, NULL);
- } else {
- curthread->td_flags |= TDF_NEEDRESCHED;
- }
} else {
CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
__func__, p->p_pid, ithread->it_need, td->td_state);
@@ -480,7 +467,7 @@ swi_sched(void *cookie, int flags)
*/
atomic_store_rel_int(&ih->ih_need, 1);
if (!(flags & SWI_DELAY)) {
- error = ithread_schedule(it, !cold && !dumping);
+ error = ithread_schedule(it);
KASSERT(error == 0, ("stray software interrupt"));
}
}
OpenPOWER on IntegriCloud