summaryrefslogtreecommitdiffstats
path: root/sys/ia64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-07-19 05:39:49 +0000
committeralc <alc@FreeBSD.org>2004-07-19 05:39:49 +0000
commit15d7825a5f77eea2647e578771fa0533edc0863d (patch)
tree40d950a1231402b832f21f630288f0db08b4086f /sys/ia64
parentf05f0023939de87de66a4906915cbab31d5f9dbe (diff)
downloadFreeBSD-src-15d7825a5f77eea2647e578771fa0533edc0863d.zip
FreeBSD-src-15d7825a5f77eea2647e578771fa0533edc0863d.tar.gz
Add partial pmap locking.
Tested by: marcel@
Diffstat (limited to 'sys/ia64')
-rw-r--r--sys/ia64/ia64/pmap.c13
-rw-r--r--sys/ia64/include/pmap.h14
2 files changed, 27 insertions, 0 deletions
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index f1f8149..95abc4e 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -445,6 +445,7 @@ pmap_bootstrap()
/*
* Initialize the kernel pmap (which is statically allocated).
*/
+ PMAP_LOCK_INIT(kernel_pmap);
for (i = 0; i < 5; i++)
kernel_pmap->pm_rid[i] = 0;
kernel_pmap->pm_active = 1;
@@ -698,6 +699,7 @@ pmap_pinit(struct pmap *pmap)
{
int i;
+ PMAP_LOCK_INIT(pmap);
for (i = 0; i < 5; i++)
pmap->pm_rid[i] = pmap_allocate_rid();
pmap->pm_active = 0;
@@ -722,6 +724,7 @@ pmap_release(pmap_t pmap)
for (i = 0; i < 5; i++)
if (pmap->pm_rid[i])
pmap_free_rid(pmap->pm_rid[i]);
+ PMAP_LOCK_DESTROY(pmap);
}
/*
@@ -1339,6 +1342,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
return;
vm_page_lock_queues();
+ PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
/*
@@ -1374,6 +1378,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
out:
pmap_install(oldpmap);
+ PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
}
@@ -1414,6 +1419,7 @@ pmap_remove_all(vm_page_t m)
pmap_t pmap = pv->pv_pmap;
vm_offset_t va = pv->pv_va;
+ PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(va);
KASSERT(pte != NULL, ("pte"));
@@ -1422,6 +1428,7 @@ pmap_remove_all(vm_page_t m)
pmap_remove_pte(pmap, pte, va, pv, 1);
pmap_invalidate_page(pmap, va);
pmap_install(oldpmap);
+ PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
@@ -1458,6 +1465,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
panic("pmap_protect: unaligned addresses");
vm_page_lock_queues();
+ PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
while (sva < eva) {
/*
@@ -1491,6 +1499,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
sva += PAGE_SIZE;
}
pmap_install(oldpmap);
+ PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
}
@@ -1704,6 +1713,7 @@ pmap_change_wiring(pmap, va, wired)
if (pmap == NULL)
return;
+ PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(va);
@@ -1720,6 +1730,7 @@ pmap_change_wiring(pmap, va, wired)
pmap_pte_set_w(pte, wired);
pmap_install(oldpmap);
+ PMAP_UNLOCK(pmap);
}
@@ -1854,6 +1865,7 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
#endif
vm_page_lock_queues();
+ PMAP_LOCK(pmap);
for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
pv;
pv = npv) {
@@ -1874,6 +1886,7 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
}
pmap_invalidate_all(pmap);
+ PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
}
diff --git a/sys/ia64/include/pmap.h b/sys/ia64/include/pmap.h
index af2610e..dbb0b2c 100644
--- a/sys/ia64/include/pmap.h
+++ b/sys/ia64/include/pmap.h
@@ -46,6 +46,8 @@
#define _MACHINE_PMAP_H_
#include <sys/queue.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
#include <machine/pte.h>
#ifdef _KERNEL
@@ -70,6 +72,7 @@ struct md_page {
};
struct pmap {
+ struct mtx pm_mtx;
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
u_int32_t pm_rid[5]; /* base RID for pmap */
int pm_active; /* active flag */
@@ -81,6 +84,17 @@ typedef struct pmap *pmap_t;
#ifdef _KERNEL
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
+
+#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
+#define PMAP_LOCK_ASSERT(pmap, type) \
+ mtx_assert(&(pmap)->pm_mtx, (type))
+#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
+#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
+ NULL, MTX_DEF)
+#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
+#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
+#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
+#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#endif
/*
OpenPOWER on IntegriCloud