summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2006-06-05 20:35:27 +0000
committeralc <alc@FreeBSD.org>2006-06-05 20:35:27 +0000
commitff4adb11fea6aec1b2e943f8d750e9b222b7c687 (patch)
tree22cfa4dc7fcc450f872692f11ffb813adfd588ae /sys/sparc64
parent2007942da5c954dc499909f31282b8b2f4f3b360 (diff)
downloadFreeBSD-src-ff4adb11fea6aec1b2e943f8d750e9b222b7c687.zip
FreeBSD-src-ff4adb11fea6aec1b2e943f8d750e9b222b7c687.tar.gz
Introduce the function pmap_enter_object(). It maps a sequence of resident
pages from the same object. Use it in vm_map_pmap_enter() to reduce the locking overhead of premapping objects. Reviewed by: tegge@
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/sparc64/pmap.c75
1 files changed, 64 insertions, 11 deletions
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 4bf35b2..43dfbd5 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -147,6 +147,16 @@ struct pmap kernel_pmap_store;
*/
static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size);
+/*
+ * Map the given physical page at the specified virtual address in the
+ * target pmap with the protection requested. If specified the page
+ * will be wired down.
+ *
+ * The page queues and pmap must be locked.
+ */
+static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, boolean_t wired);
+
extern int tl1_immu_miss_patch_1[];
extern int tl1_immu_miss_patch_2[];
extern int tl1_dmmu_miss_patch_1[];
@@ -1259,11 +1269,32 @@ void
pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
+
+ vm_page_lock_queues();
+ PMAP_LOCK(pm);
+ pmap_enter_locked(pm, va, m, prot, wired);
+ vm_page_unlock_queues();
+ PMAP_UNLOCK(pm);
+}
+
+/*
+ * Map the given physical page at the specified virtual address in the
+ * target pmap with the protection requested. If specified the page
+ * will be wired down.
+ *
+ * The page queues and pmap must be locked.
+ */
+static void
+pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ boolean_t wired)
+{
struct tte *tp;
vm_paddr_t pa;
u_long data;
int i;
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ PMAP_LOCK_ASSERT(pm, MA_OWNED);
PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
@@ -1284,9 +1315,6 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
"pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired);
- vm_page_lock_queues();
- PMAP_LOCK(pm);
-
/*
* If there is an existing mapping, and the physical address has not
* changed, must be protection or wiring change.
@@ -1383,7 +1411,35 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
tsb_tte_enter(pm, m, va, TS_8K, data);
}
- vm_page_unlock_queues();
+}
+
+/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start. This page is
+ * mapped at the given virtual address start. Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object. The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end. Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
+ vm_page_t m_start, vm_prot_t prot)
+{
+ vm_page_t m;
+ vm_pindex_t diff, psize;
+
+ psize = atop(end - start);
+ m = m_start;
+ PMAP_LOCK(pm);
+ while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
+ pmap_enter_locked(pm, start + ptoa(diff), m, prot &
+ (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ m = TAILQ_NEXT(m, listq);
+ }
PMAP_UNLOCK(pm);
}
@@ -1392,13 +1448,10 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
- vm_page_busy(m);
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(m->object);
- pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
- VM_OBJECT_LOCK(m->object);
- vm_page_lock_queues();
- vm_page_wakeup(m);
+ PMAP_LOCK(pm);
+ pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
+ FALSE);
+ PMAP_UNLOCK(pm);
return (NULL);
}
OpenPOWER on IntegriCloud