summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2002-02-06 04:30:26 +0000
committerjhb <jhb@FreeBSD.org>2002-02-06 04:30:26 +0000
commit156f4c8aea9e9bcea4428c6da79367baa11f3588 (patch)
tree2d0f06748a7a9932a6c9c09800474f8c9daa8d46 /sys
parentd2767dc7230887ad0d270b0a72649dcf486932a8 (diff)
downloadFreeBSD-src-156f4c8aea9e9bcea4428c6da79367baa11f3588.zip
FreeBSD-src-156f4c8aea9e9bcea4428c6da79367baa11f3588.tar.gz
Fixes for alpha pmap on SMP machines:
- Create a private list of active pmaps rather than abusing the list of all processes when we need to look up pmaps. The process list needs a sx lock and we can't be getting sx locks in the middle of cpu_switch() (pmap_activate() can call pmap_get_asn() from cpu_switch()). Instead, we protect the list with a spinlock. This also means the list is shorter since a pmap can be used by more than one process and we could (at least in thoery) dink with pmap's more than once, but now we only touch each pmap once when we have to update all of them. - Wrap pmap_activate()'s code to get a new ASN in an explicit critical section so that when it is called while doing an exec() we can't get preempted. - Replace splhigh() in pmap_growkernel() with a critical section to prevent preemption while we are adjusting the kernel page tables. - Fixes abuse of PCPU_GET(), which doesn't return an L-value. - Also adds some slight cleanups to the ASN handling by adding some macros instead of magic numbers in relation to the ASN and ASN generations. Reviewed by: dfr
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/pmap.c108
-rw-r--r--sys/alpha/include/pmap.h9
-rw-r--r--sys/kern/subr_witness.c1
3 files changed, 66 insertions, 52 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index f0bde96..059a268 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -315,6 +315,8 @@ vm_offset_t kernel_vm_end;
*/
static int pmap_maxasn;
static pmap_t pmap_active[MAXCPU];
+static LIST_HEAD(,pmap) allpmaps;
+static struct mtx allpmaps_lock;
/*
* Data for the pv entry allocation mechanism
@@ -546,6 +548,12 @@ pmap_bootstrap(vm_offset_t ptaddr, u_int maxasn)
nklev2 = 1;
/*
+ * Initialize list of pmaps.
+ */
+ LIST_INIT(&allpmaps);
+ LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
+
+ /*
* Set up proc0's PCB such that the ptbr points to the right place
* and has the kernel pmap's.
*/
@@ -698,52 +706,48 @@ pmap_invalidate_all_action(void *arg)
static void
pmap_get_asn(pmap_t pmap)
{
- if (pmap->pm_asn[PCPU_GET(cpuid)].gen != PCPU_GET(current_asngen)) {
- if (PCPU_GET(next_asn) > pmap_maxasn) {
+
+ if (PCPU_GET(next_asn) > pmap_maxasn) {
+ /*
+ * Start a new ASN generation.
+ *
+ * Invalidate all per-process mappings and I-cache
+ */
+ PCPU_SET(next_asn, 0);
+ PCPU_SET(current_asngen, (PCPU_GET(current_asngen) + 1) &
+ ASNGEN_MASK);
+
+ if (PCPU_GET(current_asngen) == 0) {
/*
- * Start a new ASN generation.
- *
- * Invalidate all per-process mappings and I-cache
+ * Clear the pm_asn[].gen of all pmaps.
+ * This is safe since it is only called from
+ * pmap_activate after it has deactivated
+ * the old pmap and it only affects this cpu.
*/
- PCPU_GET(next_asn) = 0;
- PCPU_GET(current_asngen)++;
- PCPU_GET(current_asngen) &= (1 << 24) - 1;
-
- if (PCPU_GET(current_asngen) == 0) {
- /*
- * Clear the pm_asn[].gen of all pmaps.
- * This is safe since it is only called from
- * pmap_activate after it has deactivated
- * the old pmap and it only affects this cpu.
- */
- struct proc *p;
- pmap_t tpmap;
+ pmap_t tpmap;
#ifdef PMAP_DIAGNOSTIC
- printf("pmap_get_asn: generation rollover\n");
+ printf("pmap_get_asn: generation rollover\n");
#endif
- PCPU_GET(current_asngen) = 1;
- sx_slock(&allproc_lock);
- LIST_FOREACH(p, &allproc, p_list) {
- if (p->p_vmspace) {
- tpmap = vmspace_pmap(p->p_vmspace);
- tpmap->pm_asn[PCPU_GET(cpuid)].gen = 0;
- }
- }
- sx_sunlock(&allproc_lock);
+ PCPU_SET(current_asngen, 1);
+ mtx_lock_spin(&allpmaps_lock);
+ LIST_FOREACH(tpmap, &allpmaps, pm_list) {
+ tpmap->pm_asn[PCPU_GET(cpuid)].gen = 0;
}
-
- /*
- * Since we are about to start re-using ASNs, we must
- * clear out the TLB and the I-cache since they are tagged
- * with the ASN.
- */
- ALPHA_TBIAP();
- alpha_pal_imb(); /* XXX overkill? */
+ mtx_unlock_spin(&allpmaps_lock);
}
- pmap->pm_asn[PCPU_GET(cpuid)].asn = PCPU_GET(next_asn)++;
- pmap->pm_asn[PCPU_GET(cpuid)].gen = PCPU_GET(current_asngen);
+
+ /*
+ * Since we are about to start re-using ASNs, we must
+ * clear out the TLB and the I-cache since they are tagged
+ * with the ASN.
+ */
+ ALPHA_TBIAP();
+ alpha_pal_imb(); /* XXX overkill? */
}
+ pmap->pm_asn[PCPU_GET(cpuid)].asn = PCPU_GET(next_asn);
+ PCPU_SET(next_asn, PCPU_GET(next_asn) + 1);
+ pmap->pm_asn[PCPU_GET(cpuid)].gen = PCPU_GET(current_asngen);
}
/***************************************************
@@ -1337,6 +1341,8 @@ pmap_pinit0(pmap)
}
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+ mtx_init(&allpmaps_lock, "allpmaps", MTX_SPIN | MTX_QUIET);
+ LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
}
/*
@@ -1386,6 +1392,9 @@ pmap_pinit(pmap)
}
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+ mtx_lock_spin(&allpmaps_lock);
+ LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
+ mtx_unlock_spin(&allpmaps_lock);
}
/*
@@ -1641,6 +1650,9 @@ retry:
if (lev1pg && !pmap_release_free_page(pmap, lev1pg))
goto retry;
+ mtx_lock_spin(&allpmaps_lock);
+ LIST_REMOVE(pmap, pm_list);
+ mtx_unlock_spin(&allpmaps_lock);
}
/*
@@ -1650,16 +1662,13 @@ void
pmap_growkernel(vm_offset_t addr)
{
/* XXX come back to this */
- struct proc *p;
struct pmap *pmap;
- int s;
pt_entry_t* pte;
pt_entry_t newlev1, newlev2;
vm_offset_t pa;
vm_page_t nkpg;
- s = splhigh();
-
+ critical_enter();
if (kernel_vm_end == 0) {
kernel_vm_end = VM_MIN_KERNEL_ADDRESS;;
@@ -1704,14 +1713,11 @@ pmap_growkernel(vm_offset_t addr)
newlev1 = pmap_phys_to_pte(pa)
| PG_V | PG_ASM | PG_KRE | PG_KWE;
- sx_slock(&allproc_lock);
- LIST_FOREACH(p, &allproc, p_list) {
- if (p->p_vmspace) {
- pmap = vmspace_pmap(p->p_vmspace);
- *pmap_lev1pte(pmap, kernel_vm_end) = newlev1;
- }
+ mtx_lock_spin(&allpmaps_lock);
+ LIST_FOREACH(pmap, &allpmaps, pm_list) {
+ *pmap_lev1pte(pmap, kernel_vm_end) = newlev1;
}
- sx_sunlock(&allproc_lock);
+ mtx_unlock_spin(&allpmaps_lock);
*pte = newlev1;
pmap_invalidate_all(kernel_pmap);
}
@@ -1742,7 +1748,7 @@ pmap_growkernel(vm_offset_t addr)
kernel_vm_end = (kernel_vm_end + ALPHA_L2SIZE) & ~(ALPHA_L2SIZE - 1);
}
- splx(s);
+ critical_exit();
}
/*
@@ -3244,6 +3250,7 @@ pmap_activate(struct thread *td)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ critical_enter();
if (pmap_active[PCPU_GET(cpuid)] && pmap != pmap_active[PCPU_GET(cpuid)]) {
atomic_clear_32(&pmap_active[PCPU_GET(cpuid)]->pm_active,
PCPU_GET(cpumask));
@@ -3260,6 +3267,7 @@ pmap_activate(struct thread *td)
atomic_set_32(&pmap->pm_active, PCPU_GET(cpumask));
td->td_pcb->pcb_hw.apcb_asn = pmap->pm_asn[PCPU_GET(cpuid)].asn;
+ critical_exit();
if (td == curthread) {
alpha_pal_swpctx((u_long)td->td_md.md_pcbpaddr);
diff --git a/sys/alpha/include/pmap.h b/sys/alpha/include/pmap.h
index bd06d9e..1c258d9 100644
--- a/sys/alpha/include/pmap.h
+++ b/sys/alpha/include/pmap.h
@@ -165,6 +165,10 @@ struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
};
+#define ASN_BITS 8
+#define ASNGEN_BITS (32 - ASN_BITS)
+#define ASNGEN_MASK ((1 << ASNGEN_BITS) - 1)
+
struct pmap {
pt_entry_t *pm_lev1; /* KVA of lev0map */
vm_object_t pm_pteobj; /* Container for pte's */
@@ -172,11 +176,12 @@ struct pmap {
int pm_count; /* reference count */
u_int32_t pm_active; /* active cpus */
struct {
- u_int32_t asn:8; /* address space number */
- u_int32_t gen:24; /* generation number */
+ u_int32_t asn:ASN_BITS; /* address space number */
+ u_int32_t gen:ASNGEN_BITS; /* generation number */
} pm_asn[MAXCPU];
struct pmap_statistics pm_stats; /* pmap statistics */
struct vm_page *pm_ptphint; /* pmap ptp hint */
+ LIST_ENTRY(pmap) pm_list; /* list of all pmaps. */
};
#define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 37dc369..a78c789 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -215,6 +215,7 @@ static struct witness_order_list_entry order_lists[] = {
/*
* leaf locks
*/
+ { "allpmaps", &lock_class_mtx_spin },
{ "icu", &lock_class_mtx_spin },
#ifdef SMP
{ "smp rendezvous", &lock_class_mtx_spin },
OpenPOWER on IntegriCloud