summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/sparc64/include/pmap.h8
-rw-r--r--sys/sparc64/sparc64/pmap.c99
-rw-r--r--sys/sparc64/sparc64/tsb.c3
3 files changed, 63 insertions, 47 deletions
diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h
index 87c56a3..1c5aa9c 100644
--- a/sys/sparc64/include/pmap.h
+++ b/sys/sparc64/include/pmap.h
@@ -43,6 +43,7 @@
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
+#include <sys/_rwlock.h>
#include <machine/cache.h>
#include <machine/tte.h>
@@ -78,6 +79,12 @@ struct pmap {
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
+#define MDPAGE_ASSERT_WLOCKED() rw_assert(&md_page_rwlock, RA_WLOCKED)
+#define MDPAGE_RLOCK() rw_rlock(&md_page_rwlock)
+#define MDPAGE_WLOCK() rw_wlock(&md_page_rwlock)
+#define MDPAGE_RUNLOCK() rw_runlock(&md_page_rwlock)
+#define MDPAGE_WUNLOCK() rw_wunlock(&md_page_rwlock)
+
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_set_memattr(m, ma) (void)0
@@ -101,6 +108,7 @@ void pmap_set_kctx(void);
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
+extern struct rwlock md_page_rwlock;
extern vm_paddr_t phys_avail[];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 496aff5..6161479 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <sys/msgbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -175,6 +176,11 @@ extern int tl1_immu_miss_patch_tsb_mask_1[];
extern int tl1_immu_miss_patch_tsb_mask_2[];
/*
+ * MD page read/write lock.
+ */
+struct rwlock md_page_rwlock;
+
+/*
* If user pmap is processed with pmap_remove and with pmap_remove and the
* resident count drops to 0, there are no more pages to remove, so we
* need not continue.
@@ -666,6 +672,11 @@ pmap_bootstrap(u_int cpu_impl)
CPU_FILL(&pm->pm_active);
/*
+ * Initialize the MD page lock.
+ */
+ rw_init(&md_page_rwlock, "MD page");
+
+ /*
* Flush all non-locked TLB entries possibly left over by the
* firmware.
*/
@@ -875,7 +886,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
struct tte *tp;
int color;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ MDPAGE_ASSERT_WLOCKED();
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_cache_enter: fake page"));
PMAP_STATS_INC(pmap_ncache_enter);
@@ -950,7 +961,7 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
struct tte *tp;
int color;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ MDPAGE_ASSERT_WLOCKED();
CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
m->md.colors[DCACHE_COLOR(va)]);
KASSERT((m->flags & PG_FICTITIOUS) == 0,
@@ -1025,7 +1036,7 @@ pmap_kenter(vm_offset_t va, vm_page_t m)
vm_page_t om;
u_long data;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ MDPAGE_ASSERT_WLOCKED();
PMAP_STATS_INC(pmap_nkenter);
tp = tsb_kvtotte(va);
CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
@@ -1087,7 +1098,7 @@ pmap_kremove(vm_offset_t va)
struct tte *tp;
vm_page_t m;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ MDPAGE_ASSERT_WLOCKED();
PMAP_STATS_INC(pmap_nkremove);
tp = tsb_kvtotte(va);
CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
@@ -1138,19 +1149,16 @@ void
pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
{
vm_offset_t va;
- int locked;
PMAP_STATS_INC(pmap_nqenter);
va = sva;
- if (!(locked = mtx_owned(&vm_page_queue_mtx)))
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
while (count-- > 0) {
pmap_kenter(va, *m);
va += PAGE_SIZE;
m++;
}
- if (!locked)
- vm_page_unlock_queues();
+ MDPAGE_WUNLOCK();
tlb_range_demap(kernel_pmap, sva, va);
}
@@ -1162,18 +1170,15 @@ void
pmap_qremove(vm_offset_t sva, int count)
{
vm_offset_t va;
- int locked;
PMAP_STATS_INC(pmap_nqremove);
va = sva;
- if (!(locked = mtx_owned(&vm_page_queue_mtx)))
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
while (count-- > 0) {
pmap_kremove(va);
va += PAGE_SIZE;
}
- if (!locked)
- vm_page_unlock_queues();
+ MDPAGE_WUNLOCK();
tlb_range_demap(kernel_pmap, sva, va);
}
@@ -1321,7 +1326,7 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
vm_page_t m;
u_long data;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ MDPAGE_ASSERT_WLOCKED();
data = atomic_readandclear_long(&tp->tte_data);
if ((data & TD_FAKE) == 0) {
m = PHYS_TO_VM_PAGE(TD_PA(data));
@@ -1358,7 +1363,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
pm->pm_context[curcpu], start, end);
if (PMAP_REMOVE_DONE(pm))
return;
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
PMAP_LOCK(pm);
if (end - start > PMAP_TSB_THRESH) {
tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
@@ -1371,7 +1376,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
tlb_range_demap(pm, start, end - 1);
}
PMAP_UNLOCK(pm);
- vm_page_unlock_queues();
+ MDPAGE_WUNLOCK();
}
void
@@ -1384,7 +1389,7 @@ pmap_remove_all(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
tpn = TAILQ_NEXT(tp, tte_link);
if ((tp->tte_data & TD_PV) == 0)
@@ -1406,8 +1411,8 @@ pmap_remove_all(vm_page_t m)
TTE_ZERO(tp);
PMAP_UNLOCK(pm);
}
+ MDPAGE_WUNLOCK();
vm_page_aflag_clear(m, PGA_WRITEABLE);
- vm_page_unlock_queues();
}
static int
@@ -1469,11 +1474,11 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
PMAP_LOCK(pm);
pmap_enter_locked(pm, va, m, prot, wired);
- vm_page_unlock_queues();
PMAP_UNLOCK(pm);
+ MDPAGE_WUNLOCK();
}
/*
@@ -1492,7 +1497,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t real;
u_long data;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ MDPAGE_ASSERT_WLOCKED();
PMAP_LOCK_ASSERT(pm, MA_OWNED);
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
VM_OBJECT_LOCKED(m->object),
@@ -1635,27 +1640,27 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
psize = atop(end - start);
m = m_start;
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
PMAP_LOCK(pm);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
pmap_enter_locked(pm, start + ptoa(diff), m, prot &
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
m = TAILQ_NEXT(m, listq);
}
- vm_page_unlock_queues();
PMAP_UNLOCK(pm);
+ MDPAGE_WUNLOCK();
}
void
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
PMAP_LOCK(pm);
pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
- vm_page_unlock_queues();
PMAP_UNLOCK(pm);
+ MDPAGE_WUNLOCK();
}
void
@@ -1700,6 +1705,8 @@ pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp,
vm_page_t m;
u_long data;
+ MDPAGE_ASSERT_WLOCKED();
+
if ((tp->tte_data & TD_FAKE) != 0)
return (1);
if (tsb_tte_lookup(dst_pmap, va) == NULL) {
@@ -1720,7 +1727,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
if (dst_addr != src_addr)
return;
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
if (dst_pmap < src_pmap) {
PMAP_LOCK(dst_pmap);
PMAP_LOCK(src_pmap);
@@ -1738,9 +1745,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
pmap_copy_tte(src_pmap, dst_pmap, tp, va);
tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
}
- vm_page_unlock_queues();
PMAP_UNLOCK(src_pmap);
PMAP_UNLOCK(dst_pmap);
+ MDPAGE_WUNLOCK();
}
void
@@ -1937,7 +1944,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
("pmap_page_exists_quick: page %p is not managed", m));
loops = 0;
rv = FALSE;
- vm_page_lock_queues();
+ MDPAGE_RLOCK();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@@ -1948,7 +1955,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
if (++loops >= 16)
break;
}
- vm_page_unlock_queues();
+ MDPAGE_RUNLOCK();
return (rv);
}
@@ -1965,11 +1972,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (count);
- vm_page_lock_queues();
+ MDPAGE_RLOCK();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
count++;
- vm_page_unlock_queues();
+ MDPAGE_RUNLOCK();
return (count);
}
@@ -1996,13 +2003,13 @@ pmap_page_is_mapped(vm_page_t m)
rv = FALSE;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (rv);
- vm_page_lock_queues();
+ MDPAGE_RLOCK();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
if ((tp->tte_data & TD_PV) != 0) {
rv = TRUE;
break;
}
- vm_page_unlock_queues();
+ MDPAGE_RUNLOCK();
return (rv);
}
@@ -2028,7 +2035,7 @@ pmap_ts_referenced(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_ts_referenced: page %p is not managed", m));
count = 0;
- vm_page_lock_queues();
+ MDPAGE_WLOCK();
if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
tpf = tp;
do {
@@ -2042,7 +2049,7 @@ pmap_ts_referenced(vm_page_t m)
break;
} while ((tp = tpn) != NULL && tp != tpf);
}
- vm_page_unlock_queues();
+ MDPAGE_WUNLOCK();
return (count);
}
@@ -2065,7 +2072,7 @@ pmap_is_modified(vm_page_t m)
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
- vm_page_lock_queues();
+ MDPAGE_RLOCK();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@@ -2074,7 +2081,7 @@ pmap_is_modified(vm_page_t m)
break;
}
}
- vm_page_unlock_queues();
+ MDPAGE_RUNLOCK();
return (rv);
}
@@ -2108,7 +2115,7 @@ pmap_is_referenced(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
rv = FALSE;
- vm_page_lock_queues();
+ MDPAGE_RLOCK();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@@ -2117,7 +2124,7 @@ pmap_is_referenced(vm_page_t m)
break;
}
}
- vm_page_unlock_queues();
+ MDPAGE_RUNLOCK();
return (rv);
}
@@ -2140,7 +2147,7 @@ pmap_clear_modify(vm_page_t m)
*/
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
- vm_page_lock_queues();
+ MDPAGE_RLOCK();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@@ -2148,7 +2155,7 @@ pmap_clear_modify(vm_page_t m)
if ((data & TD_W) != 0)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
- vm_page_unlock_queues();
+ MDPAGE_RUNLOCK();
}
void
@@ -2159,7 +2166,7 @@ pmap_clear_reference(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_reference: page %p is not managed", m));
- vm_page_lock_queues();
+ MDPAGE_RLOCK();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@@ -2167,7 +2174,7 @@ pmap_clear_reference(vm_page_t m)
if ((data & TD_REF) != 0)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
- vm_page_unlock_queues();
+ MDPAGE_RUNLOCK();
}
void
@@ -2188,7 +2195,7 @@ pmap_remove_write(vm_page_t m)
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
- vm_page_lock_queues();
+ MDPAGE_RLOCK();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@@ -2198,8 +2205,8 @@ pmap_remove_write(vm_page_t m)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
}
+ MDPAGE_RUNLOCK();
vm_page_aflag_clear(m, PGA_WRITEABLE);
- vm_page_unlock_queues();
}
int
diff --git a/sys/sparc64/sparc64/tsb.c b/sys/sparc64/sparc64/tsb.c
index 0f34259..cd25b1a 100644
--- a/sys/sparc64/sparc64/tsb.c
+++ b/sys/sparc64/sparc64/tsb.c
@@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -131,7 +132,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long sz, u_long data)
PMAP_STATS_INC(tsb_nenter_u_oc);
}
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ MDPAGE_ASSERT_WLOCKED();
PMAP_LOCK_ASSERT(pm, MA_OWNED);
if (pm == kernel_pmap) {
PMAP_STATS_INC(tsb_nenter_k);
OpenPOWER on IntegriCloud