summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2014-07-06 17:42:38 +0000
committeralc <alc@FreeBSD.org>2014-07-06 17:42:38 +0000
commitd74e85dbb9aba51f1ffc63a0ebae250559a8fcd9 (patch)
treea294db7e9041f651c7a421baf7d2bc56d5447086 /sys/i386
parent3437bf817836057606a700b02d68b0ed46582823 (diff)
downloadFreeBSD-src-d74e85dbb9aba51f1ffc63a0ebae250559a8fcd9.zip
FreeBSD-src-d74e85dbb9aba51f1ffc63a0ebae250559a8fcd9.tar.gz
Introduce pmap_unwire(). It will replace pmap_change_wiring(). There are
several reasons for this change: pmap_change_wiring() has never (in my memory) been used to set the wired attribute on a virtual page. We have always used pmap_enter() to do that. Moreover, it is not really safe to use pmap_change_wiring() to set the wired attribute on a virtual page. The description of pmap_change_wiring() says that it assumes the existence of a mapping in the pmap. However, non-wired mappings may be reclaimed by the pmap at any time. (See pmap_collect().) Many implementations of pmap_change_wiring() will crash if the mapping does not exist. pmap_unwire() accepts a range of virtual addresses, whereas pmap_change_wiring() acts upon a single virtual page. Since we are typically unwiring a range of virtual addresses, pmap_unwire() will be more efficient. Moreover, pmap_unwire() allows us to unwire superpage mappings. Previously, we were forced to demote the superpage mapping, because pmap_change_wiring() only allowed us to express the unwiring of a single base page mapping at a time. This added to the overhead of unwiring for large ranges of addresses, including the implicit unwiring that occurs at process termination. Implementations for arm and powerpc will follow. Discussed with: jeff, marcel Reviewed by: kib Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c93
-rw-r--r--sys/i386/xen/pmap.c51
2 files changed, 144 insertions, 0 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 3085157..c0ad7df 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -4012,6 +4012,99 @@ out:
PMAP_UNLOCK(pmap);
}
+/*
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
+ */
+void
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t pdnxt;
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ boolean_t pv_lists_locked;
+
+ if (pmap_is_current(pmap))
+ pv_lists_locked = FALSE;
+ else {
+ pv_lists_locked = TRUE;
+resume:
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ }
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = pdnxt) {
+ pdnxt = (sva + NBPDR) & ~PDRMASK;
+ if (pdnxt < sva)
+ pdnxt = eva;
+ pde = pmap_pde(pmap, sva);
+ if ((*pde & PG_V) == 0)
+ continue;
+ if ((*pde & PG_PS) != 0) {
+ if ((*pde & PG_W) == 0)
+ panic("pmap_unwire: pde %#jx is missing PG_W",
+ (uintmax_t)*pde);
+
+ /*
+ * Are we unwiring the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == pdnxt && eva >= pdnxt) {
+ /*
+ * Regardless of whether a pde (or pte) is 32
+ * or 64 bits in size, PG_W is among the least
+ * significant 32 bits.
+ */
+ atomic_clear_int((u_int *)pde, PG_W);
+ pmap->pm_stats.wired_count -= NBPDR /
+ PAGE_SIZE;
+ continue;
+ } else {
+ if (!pv_lists_locked) {
+ pv_lists_locked = TRUE;
+ if (!rw_try_wlock(&pvh_global_lock)) {
+ PMAP_UNLOCK(pmap);
+ /* Repeat sva. */
+ goto resume;
+ }
+ sched_pin();
+ }
+ if (!pmap_demote_pde(pmap, pde, sva))
+ panic("pmap_unwire: demotion failed");
+ }
+ }
+ if (pdnxt > eva)
+ pdnxt = eva;
+ for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & PG_V) == 0)
+ continue;
+ if ((*pte & PG_W) == 0)
+ panic("pmap_unwire: pte %#jx is missing PG_W",
+ (uintmax_t)*pte);
+
+ /*
+ * PG_W must be cleared atomically. Although the pmap
+ * lock synchronizes access to PG_W, another processor
+ * could be setting PG_M and/or PG_A concurrently.
+ *
+ * PG_W is among the least significant 32 bits.
+ */
+ atomic_clear_int((u_int *)pte, PG_W);
+ pmap->pm_stats.wired_count--;
+ }
+ }
+ if (pv_lists_locked) {
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ }
+ PMAP_UNLOCK(pmap);
+}
/*
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index fdc64bc..54ee3a6 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -3199,6 +3199,57 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
rw_wunlock(&pvh_global_lock);
}
+/*
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
+ */
+void
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t pdnxt;
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+
+ CTR3(KTR_PMAP, "pmap_unwire: pmap=%p sva=0x%x eva=0x%x", pmap, sva,
+ eva);
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = pdnxt) {
+ pdnxt = (sva + NBPDR) & ~PDRMASK;
+ if (pdnxt < sva)
+ pdnxt = eva;
+ pde = pmap_pde(pmap, sva);
+ if ((*pde & PG_V) == 0)
+ continue;
+ if ((*pde & PG_PS) != 0)
+ panic("pmap_unwire: unexpected PG_PS in pde %#jx",
+ (uintmax_t)*pde);
+ if (pdnxt > eva)
+ pdnxt = eva;
+ for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & PG_V) == 0)
+ continue;
+ if ((*pte & PG_W) == 0)
+ panic("pmap_unwire: pte %#jx is missing PG_W",
+ (uintmax_t)*pte);
+ PT_SET_VA_MA(pte, *pte & ~PG_W, FALSE);
+ pmap->pm_stats.wired_count--;
+ }
+ }
+ if (*PMAP1)
+ PT_CLEAR_VA(PMAP1, FALSE);
+ PT_UPDATES_FLUSH();
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+}
/*
OpenPOWER on IntegriCloud