summaryrefslogtreecommitdiffstats
path: root/sys/i386/xen/pmap.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2011-05-20 14:53:16 +0000
committerattilio <attilio@FreeBSD.org>2011-05-20 14:53:16 +0000
commitccbb37970b3270861b379906a11b534a14e9631d (patch)
treef02b89a241b050f61bf77ffc6f1f3d169b534d42 /sys/i386/xen/pmap.c
parent0372174d48f975382712625e72bc9b47e95be3ce (diff)
downloadFreeBSD-src-ccbb37970b3270861b379906a11b534a14e9631d.zip
FreeBSD-src-ccbb37970b3270861b379906a11b534a14e9631d.tar.gz
Reintroduce the lazypmap infrastructure and convert it to using
cpuset_t. Requested by: alc
Diffstat (limited to 'sys/i386/xen/pmap.c')
-rw-r--r--sys/i386/xen/pmap.c99
1 files changed, 99 insertions, 0 deletions
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 74ace61..1149b6f 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -1683,6 +1683,104 @@ retry:
* Pmap allocation/deallocation routines.
***************************************************/
+#ifdef SMP
+/*
+ * Deal with a SMP shootdown of other users of the pmap that we are
+ * trying to dispose of. This can be a bit hairy.
+ */
+static cpuset_t *lazymask;
+static u_int lazyptd;
+static volatile u_int lazywait;
+
+void pmap_lazyfix_action(void);
+
+void
+pmap_lazyfix_action(void)
+{
+
+#ifdef COUNT_IPIS
+ (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
+#endif
+ if (rcr3() == lazyptd)
+ load_cr3(PCPU_GET(curpcb)->pcb_cr3);
+ CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask);
+ atomic_store_rel_int(&lazywait, 1);
+}
+
+static void
+pmap_lazyfix_self(cpuset_t mymask)
+{
+
+ if (rcr3() == lazyptd)
+ load_cr3(PCPU_GET(curpcb)->pcb_cr3);
+ CPU_NAND_ATOMIC(lazymask, &mymask);
+}
+
+
+static void
+pmap_lazyfix(pmap_t pmap)
+{
+ cpuset_t mymask, mask;
+ u_int spins;
+ int lsb;
+
+ mask = pmap->pm_active;
+ while (!CPU_EMPTY(&mask)) {
+ spins = 50000000;
+
+ /* Find least significant set bit. */
+ lsb = cpusetobj_ffs(&mask);
+ lsb--;
+ CPU_SETOF(lsb, &mask);
+ mtx_lock_spin(&smp_ipi_mtx);
+#ifdef PAE
+ lazyptd = vtophys(pmap->pm_pdpt);
+#else
+ lazyptd = vtophys(pmap->pm_pdir);
+#endif
+ mymask = PCPU_GET(cpumask);
+ if (mask == mymask) {
+ lazymask = &pmap->pm_active;
+ pmap_lazyfix_self(mymask);
+ } else {
+ atomic_store_rel_int((u_int *)&lazymask,
+ (u_int)&pmap->pm_active);
+ atomic_store_rel_int(&lazywait, 0);
+ ipi_selected(mask, IPI_LAZYPMAP);
+ while (lazywait == 0) {
+ ia32_pause();
+ if (--spins == 0)
+ break;
+ }
+ }
+ mtx_unlock_spin(&smp_ipi_mtx);
+ if (spins == 0)
+ printf("pmap_lazyfix: spun for 50000000\n");
+ mask = pmap->pm_active;
+ }
+}
+
+#else /* SMP */
+
+/*
+ * Cleaning up on uniprocessor is easy. For various reasons, we're
+ * unlikely to have to even execute this code, including the fact
+ * that the cleanup is deferred until the parent does a wait(2), which
+ * means that another userland process has run.
+ */
+static void
+pmap_lazyfix(pmap_t pmap)
+{
+ u_int cr3;
+
+ cr3 = vtophys(pmap->pm_pdir);
+ if (cr3 == rcr3()) {
+ load_cr3(PCPU_GET(curpcb)->pcb_cr3);
+ CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active);
+ }
+}
+#endif /* SMP */
+
/*
* Release any resources held by the given physical map.
* Called when a pmap initialized by pmap_pinit is being released.
@@ -1708,6 +1806,7 @@ pmap_release(pmap_t pmap)
mtx_lock(&createdelete_lock);
#endif
+ pmap_lazyfix(pmap);
mtx_lock_spin(&allpmaps_lock);
LIST_REMOVE(pmap, pm_list);
mtx_unlock_spin(&allpmaps_lock);
OpenPOWER on IntegriCloud