summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-08-22 18:05:31 +0000
committerkib <kib@FreeBSD.org>2013-08-22 18:05:31 +0000
commit076969f54a69e4b15a5489465a537f9eb246b869 (patch)
tree14c236c22e884ab0e69125036d3ebc0789976529 /sys/amd64
parent28a0f6da764374a0d3c0525bff10e36bc80c22e4 (diff)
downloadFreeBSD-src-076969f54a69e4b15a5489465a537f9eb246b869.zip
FreeBSD-src-076969f54a69e4b15a5489465a537f9eb246b869.tar.gz
Use the generation count of the pv list to work around LOR between
pmap lock and pv list lock, and use the shared locking on pvh_global_lock in pmap_remove_write(), same as it was done for pmap_ts_referenced(). Noted and reviewed by: alc (previous version) Tested by: pho Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c43
1 files changed, 37 insertions, 6 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 215a4f1..0c87c35 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -4765,10 +4765,12 @@ pmap_remove_write(vm_page_t m)
{
struct md_page *pvh;
pmap_t pmap;
+ struct rwlock *lock;
pv_entry_t next_pv, pv;
pd_entry_t *pde;
pt_entry_t oldpte, *pte;
vm_offset_t va;
+ int pvh_gen, md_gen;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_write: page %p is not managed", m));
@@ -4781,23 +4783,51 @@ pmap_remove_write(vm_page_t m)
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
return;
- rw_wlock(&pvh_global_lock);
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+retry_pv_loop:
+ rw_wlock(lock);
if ((m->flags & PG_FICTITIOUS) != 0)
goto small_mappings;
- pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
pmap = PV_PMAP(pv);
- PMAP_LOCK(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(lock);
+ goto retry_pv_loop;
+ }
+ }
va = pv->pv_va;
pde = pmap_pde(pmap, va);
if ((*pde & PG_RW) != 0)
- (void)pmap_demote_pde(pmap, pde, va);
+ (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
+ KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
+ ("inconsistent pv lock %p %p for page %p",
+ lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
PMAP_UNLOCK(pmap);
}
small_mappings:
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
pmap = PV_PMAP(pv);
- PMAP_LOCK(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen ||
+ md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(lock);
+ goto retry_pv_loop;
+ }
+ }
pde = pmap_pde(pmap, pv->pv_va);
KASSERT((*pde & PG_PS) == 0,
("pmap_remove_write: found a 2mpage in page %p's pv list",
@@ -4815,8 +4845,9 @@ retry:
}
PMAP_UNLOCK(pmap);
}
+ rw_wunlock(lock);
vm_page_aflag_clear(m, PGA_WRITEABLE);
- rw_wunlock(&pvh_global_lock);
+ rw_runlock(&pvh_global_lock);
}
/*
OpenPOWER on IntegriCloud