summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2003-04-02 23:53:30 +0000
committerpeter <peter@FreeBSD.org>2003-04-02 23:53:30 +0000
commit46969da5f8b7d09f4fb1368d6be669af63110127 (patch)
tree4ec7883ecb294f0c60d2aa5a5a0b58973fafc545 /sys/kern
parentde8dcf74da6f877502c92f233c1c8020975a826f (diff)
downloadFreeBSD-src-46969da5f8b7d09f4fb1368d6be669af63110127.zip
FreeBSD-src-46969da5f8b7d09f4fb1368d6be669af63110127.tar.gz
Commit a partial lazy thread switch mechanism for i386. it isn't as lazy
as it could be and can do with some more cleanup. Currently its under options LAZY_SWITCH. What this does is avoid %cr3 reloads for short context switches that do not involve another user process. ie: we can take an interrupt, switch to a kthread and return to the user without explicitly flushing the tlb. However, this isn't as exciting as it could be, the interrupt overhead is still high and too much blocks on Giant still. There are some debug sysctls, for stats and for an on/off switch. The main problem with doing this has been "what if the process that you're running on exits while we're borrowing its address space?" - in this case we use an IPI to give it a kick when we're about to reclaim the pmap. Its not compiled in unless you add the LAZY_SWITCH option. I want to fix a few more things and get some more feedback before turning it on by default. This is NOT a replacement for Bosko's lazy interrupt stuff. This was more meant for the kthread case, while his was for interrupts. Mine helps a little for interrupts, but his helps a lot more. The stats are enabled with options SWTCH_OPTIM_STATS - this has been a pseudo-option for years, I just added a bunch of stuff to it. One non-trivial change was to select a new thread before calling cpu_switch() in the first place. This allows us to catch the silly case of doing a cpu_switch() to the current process. This happens uncomfortably often. This simplifies a bit of the asm code in cpu_switch (no longer have to call choosethread() in the middle). This has been implemented on i386 and (thanks to jake) sparc64. The others will come soon. This is actually seperate to the lazy switch stuff. Glanced at by: jake, jhb
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_kse.c6
-rw-r--r--sys/kern/kern_switch.c18
-rw-r--r--sys/kern/kern_synch.c26
-rw-r--r--sys/kern/kern_thr.c4
-rw-r--r--sys/kern/kern_thread.c6
-rw-r--r--sys/kern/subr_witness.c6
6 files changed, 62 insertions, 4 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 3699461..d3ceb09 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -1250,7 +1250,13 @@ thread_exit(void)
PROC_UNLOCK(p);
}
/* XXX Shouldn't cpu_throw() here. */
+ mtx_assert(&sched_lock, MA_OWNED);
+#if defined(__i386__) || defined(__sparc64__)
+ cpu_throw(td, choosethread());
+#else
cpu_throw();
+#endif
+ panic("I'm a teapot!");
/* NOTREACHED */
}
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 8c39243..377ad42 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -98,6 +98,9 @@ reassigned to keep this true.
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sched.h>
+#if defined(SMP) && defined(__i386__)
+#include <sys/smp.h>
+#endif
#include <machine/critical.h>
CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
@@ -122,8 +125,21 @@ choosethread(void)
struct thread *td;
struct ksegrp *kg;
+#if defined(SMP) && defined(__i386__)
+ if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
+ /* Shutting down, run idlethread on AP's */
+ td = PCPU_GET(idlethread);
+ ke = td->td_kse;
+ CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
+ ke->ke_flags |= KEF_DIDRUN;
+ TD_SET_RUNNING(td);
+ return (td);
+ }
+#endif
+
retry:
- if ((ke = sched_choose())) {
+ ke = sched_choose();
+ if (ke) {
td = ke->ke_thread;
KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
kg = ke->ke_ksegrp;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index cf6591f..b582e20 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -41,6 +41,9 @@
#include "opt_ddb.h"
#include "opt_ktrace.h"
+#ifdef __i386__
+#include "opt_swtch.h"
+#endif
#include <sys/param.h>
#include <sys/systm.h>
@@ -67,6 +70,9 @@
#endif
#include <machine/cpu.h>
+#ifdef SWTCH_OPTIM_STATS
+#include <machine/md_var.h>
+#endif
static void sched_setup(void *dummy);
SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
@@ -449,12 +455,16 @@ void
mi_switch(void)
{
struct bintime new_switchtime;
- struct thread *td = curthread; /* XXX */
- struct proc *p = td->td_proc; /* XXX */
+ struct thread *td;
+#if defined(__i386__) || defined(__sparc64__)
+ struct thread *newtd;
+#endif
+ struct proc *p;
u_int sched_nest;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
-
+ td = curthread; /* XXX */
+ p = td->td_proc; /* XXX */
KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
#ifdef INVARIANTS
if (!TD_ON_LOCK(td) &&
@@ -506,7 +516,17 @@ mi_switch(void)
thread_switchout(td);
sched_switchout(td);
+#if defined(__i386__) || defined(__sparc64__)
+ newtd = choosethread();
+ if (td != newtd)
+ cpu_switch(td, newtd); /* SHAZAM!! */
+#ifdef SWTCH_OPTIM_STATS
+ else
+ stupid_switch++;
+#endif
+#else
cpu_switch(); /* SHAZAM!!*/
+#endif
sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 31c638e..5f23f6d 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -106,7 +106,11 @@ thr_exit1(void)
td->td_last_kse = NULL;
thread_stash(td);
+#if defined(__i386__) || defined(__sparc64__)
+ cpu_throw(td, choosethread());
+#else
cpu_throw();
+#endif
}
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 3699461..d3ceb09 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -1250,7 +1250,13 @@ thread_exit(void)
PROC_UNLOCK(p);
}
/* XXX Shouldn't cpu_throw() here. */
+ mtx_assert(&sched_lock, MA_OWNED);
+#if defined(__i386__) || defined(__sparc64__)
+ cpu_throw(td, choosethread());
+#else
cpu_throw();
+#endif
+ panic("I'm a teapot!");
/* NOTREACHED */
}
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 94c9390..3b7526a 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -84,6 +84,9 @@
#include "opt_ddb.h"
#include "opt_witness.h"
+#ifdef __i386__
+#include "opt_swtch.h"
+#endif
#include <sys/param.h>
#include <sys/bus.h>
@@ -295,6 +298,9 @@ static struct witness_order_list_entry order_lists[] = {
#if defined(__i386__) && defined(APIC_IO)
{ "tlb", &lock_class_mtx_spin },
#endif
+#if defined(__i386__) && defined(LAZY_SWITCH)
+ { "lazypmap", &lock_class_mtx_spin },
+#endif
#ifdef __sparc64__
{ "ipi", &lock_class_mtx_spin },
#endif
OpenPOWER on IntegriCloud