diff options
author | attilio <attilio@FreeBSD.org> | 2011-05-20 14:53:16 +0000 |
---|---|---|
committer | attilio <attilio@FreeBSD.org> | 2011-05-20 14:53:16 +0000 |
commit | ccbb37970b3270861b379906a11b534a14e9631d (patch) | |
tree | f02b89a241b050f61bf77ffc6f1f3d169b534d42 /sys/i386/xen | |
parent | 0372174d48f975382712625e72bc9b47e95be3ce (diff) | |
download | FreeBSD-src-ccbb37970b3270861b379906a11b534a14e9631d.zip FreeBSD-src-ccbb37970b3270861b379906a11b534a14e9631d.tar.gz |
Reintroduce the lazypmap infrastructure and convert it to using
cpuset_t.
Requested by: alc
Diffstat (limited to 'sys/i386/xen')
-rw-r--r-- | sys/i386/xen/mp_machdep.c | 11 | ||||
-rw-r--r-- | sys/i386/xen/pmap.c | 99 |
2 files changed, 109 insertions, 1 deletions
diff --git a/sys/i386/xen/mp_machdep.c b/sys/i386/xen/mp_machdep.c index 1565859..2d05596 100644 --- a/sys/i386/xen/mp_machdep.c +++ b/sys/i386/xen/mp_machdep.c @@ -154,6 +154,7 @@ static cpuset_t hyperthreading_cpus_mask; extern void Xhypervisor_callback(void); extern void failsafe_callback(void); +extern void pmap_lazyfix_action(void); struct cpu_group * cpu_topo(void) @@ -341,16 +342,24 @@ iv_invlcache(uintptr_t a, uintptr_t b) atomic_add_int(&smp_tlb_wait, 1); } +static void +iv_lazypmap(uintptr_t a, uintptr_t b) +{ + pmap_lazyfix_action(); + atomic_add_int(&smp_tlb_wait, 1); +} + /* * These start from "IPI offset" APIC_IPI_INTS */ -static call_data_func_t *ipi_vectors[5] = +static call_data_func_t *ipi_vectors[6] = { iv_rendezvous, iv_invltlb, iv_invlpg, iv_invlrng, iv_invlcache, + iv_lazypmap, }; /* diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index 74ace61..1149b6f 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -1683,6 +1683,104 @@ retry: * Pmap allocation/deallocation routines. ***************************************************/ +#ifdef SMP +/* + * Deal with a SMP shootdown of other users of the pmap that we are + * trying to dispose of. This can be a bit hairy. + */ +static cpuset_t *lazymask; +static u_int lazyptd; +static volatile u_int lazywait; + +void pmap_lazyfix_action(void); + +void +pmap_lazyfix_action(void) +{ + +#ifdef COUNT_IPIS + (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; +#endif + if (rcr3() == lazyptd) + load_cr3(PCPU_GET(curpcb)->pcb_cr3); + CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask); + atomic_store_rel_int(&lazywait, 1); +} + +static void +pmap_lazyfix_self(cpuset_t mymask) +{ + + if (rcr3() == lazyptd) + load_cr3(PCPU_GET(curpcb)->pcb_cr3); + CPU_NAND_ATOMIC(lazymask, &mymask); +} + + +static void +pmap_lazyfix(pmap_t pmap) +{ + cpuset_t mymask, mask; + u_int spins; + int lsb; + + mask = pmap->pm_active; + while (!CPU_EMPTY(&mask)) { + spins = 50000000; + + /* Find least significant set bit. */ + lsb = cpusetobj_ffs(&mask); + lsb--; + CPU_SETOF(lsb, &mask); + mtx_lock_spin(&smp_ipi_mtx); +#ifdef PAE + lazyptd = vtophys(pmap->pm_pdpt); +#else + lazyptd = vtophys(pmap->pm_pdir); +#endif + mymask = PCPU_GET(cpumask); + if (mask == mymask) { + lazymask = &pmap->pm_active; + pmap_lazyfix_self(mymask); + } else { + atomic_store_rel_int((u_int *)&lazymask, + (u_int)&pmap->pm_active); + atomic_store_rel_int(&lazywait, 0); + ipi_selected(mask, IPI_LAZYPMAP); + while (lazywait == 0) { + ia32_pause(); + if (--spins == 0) + break; + } + } + mtx_unlock_spin(&smp_ipi_mtx); + if (spins == 0) + printf("pmap_lazyfix: spun for 50000000\n"); + mask = pmap->pm_active; + } +} + +#else /* SMP */ + +/* + * Cleaning up on uniprocessor is easy. For various reasons, we're + * unlikely to have to even execute this code, including the fact + * that the cleanup is deferred until the parent does a wait(2), which + * means that another userland process has run. + */ +static void +pmap_lazyfix(pmap_t pmap) +{ + u_int cr3; + + cr3 = vtophys(pmap->pm_pdir); + if (cr3 == rcr3()) { + load_cr3(PCPU_GET(curpcb)->pcb_cr3); + CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active); + } +} +#endif /* SMP */ + /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. @@ -1708,6 +1806,7 @@ pmap_release(pmap_t pmap) mtx_lock(&createdelete_lock); #endif + pmap_lazyfix(pmap); mtx_lock_spin(&allpmaps_lock); LIST_REMOVE(pmap, pm_list); mtx_unlock_spin(&allpmaps_lock); |