summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-08 20:34:01 +0000
committeralc <alc@FreeBSD.org>2010-05-08 20:34:01 +0000
commit40b44f9713de70170857e6291876dfce94b6ef43 (patch)
tree55e1683864edab13ef62b5f9fac6a4928f23f506 /sys/i386
parent94ac1169df0fe3609aa671582574d421e89b7ff3 (diff)
downloadFreeBSD-src-40b44f9713de70170857e6291876dfce94b6ef43.zip
FreeBSD-src-40b44f9713de70170857e6291876dfce94b6ef43.tar.gz
Push down the page queues into vm_page_cache(), vm_page_try_to_cache(), and
vm_page_try_to_free(). Consequently, push down the page queues lock into pmap_enter_quick(), pmap_page_wired_mapped(), pmap_remove_all(), and pmap_remove_write(). Push down the page queues lock into Xen's pmap_page_is_mapped(). (I overlooked the Xen pmap in r207702.) Switch to a per-processor counter for the total number of pages cached.
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c15
-rw-r--r--sys/i386/xen/pmap.c37
2 files changed, 28 insertions, 24 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index b342a67..4b87922 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2900,7 +2900,7 @@ pmap_remove_all(vm_page_t m)
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_remove_all: page %p is fictitious", m));
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ vm_page_lock_queues();
sched_pin();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
@@ -2940,6 +2940,7 @@ pmap_remove_all(vm_page_t m)
}
vm_page_flag_clear(m, PG_WRITEABLE);
sched_unpin();
+ vm_page_unlock_queues();
}
/*
@@ -3544,8 +3545,10 @@ void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
+ vm_page_lock_queues();
PMAP_LOCK(pmap);
- (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
+ (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
+ vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@@ -4088,8 +4091,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
+ vm_page_lock_queues();
count = pmap_pvh_wired_mappings(&m->md, count);
- return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
+ count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count);
+ vm_page_unlock_queues();
+ return (count);
}
/*
@@ -4404,10 +4410,10 @@ pmap_remove_write(vm_page_t m)
pt_entry_t oldpte, *pte;
vm_offset_t va;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
+ vm_page_lock_queues();
sched_pin();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
@@ -4445,6 +4451,7 @@ retry:
}
vm_page_flag_clear(m, PG_WRITEABLE);
sched_unpin();
+ vm_page_unlock_queues();
}
/*
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 1bd081f..42fdff9 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -2485,16 +2485,9 @@ pmap_remove_all(vm_page_t m)
pt_entry_t *pte, tpte;
vm_page_t free;
-#if defined(PMAP_DIAGNOSTIC)
- /*
- * XXX This makes pmap_remove_all() illegal for non-managed pages!
- */
- if (m->flags & PG_FICTITIOUS) {
- panic("pmap_remove_all: illegal for unmanaged page, va: 0x%jx",
- VM_PAGE_TO_PHYS(m) & 0xffffffff);
- }
-#endif
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ KASSERT((m->flags & PG_FICTITIOUS) == 0,
+ ("pmap_remove_all: page %p is fictitious", m));
+ vm_page_lock_queues();
sched_pin();
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pmap = PV_PMAP(pv);
@@ -2531,6 +2524,7 @@ pmap_remove_all(vm_page_t m)
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
sched_unpin();
+ vm_page_unlock_queues();
}
/*
@@ -2946,10 +2940,12 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x",
pmap, va, m, prot);
+ vm_page_lock_queues();
PMAP_LOCK(pmap);
- (void) pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL);
+ (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL);
if (count)
HYPERVISOR_multicall(&mcl, count);
+ vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@@ -3504,7 +3500,7 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ vm_page_lock_queues();
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
@@ -3515,6 +3511,7 @@ pmap_page_wired_mappings(vm_page_t m)
PMAP_UNLOCK(pmap);
}
sched_unpin();
+ vm_page_unlock_queues();
return (count);
}
@@ -3525,16 +3522,15 @@ pmap_page_wired_mappings(vm_page_t m)
boolean_t
pmap_page_is_mapped(vm_page_t m)
{
- struct md_page *pvh;
+ boolean_t rv;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if (TAILQ_EMPTY(&m->md.pv_list)) {
- pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
- return (!TAILQ_EMPTY(&pvh->pv_list));
- } else
- return (TRUE);
+ vm_page_lock_queues();
+ rv = !TAILQ_EMPTY(&m->md.pv_list) ||
+ !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
+ vm_page_unlock_queues();
+ return (rv);
}
/*
@@ -3784,10 +3780,10 @@ pmap_remove_write(vm_page_t m)
pmap_t pmap;
pt_entry_t oldpte, *pte;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
+ vm_page_lock_queues();
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
@@ -3818,6 +3814,7 @@ retry:
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
sched_unpin();
+ vm_page_unlock_queues();
}
/*
OpenPOWER on IntegriCloud