diff options
author | kmacy <kmacy@FreeBSD.org> | 2010-04-30 00:46:43 +0000 |
---|---|---|
committer | kmacy <kmacy@FreeBSD.org> | 2010-04-30 00:46:43 +0000 |
commit | 1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch) | |
tree | 250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/powerpc/booke | |
parent | a43425e8835983e7c93a15a2d6a42fdb292c1676 (diff) | |
download | FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz |
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks
(based on a patch by Jeff Roberson), I've implemented page lock
support in the MI code and have only moved vm_page's hold_count
out from under page queue mutex to page lock. This changes
pmap_extract_and_hold on all pmaps.
Supported by: Bitgravity Inc.
Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/powerpc/booke')
-rw-r--r-- | sys/powerpc/booke/pmap.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 549eaaa..0b0fce4 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -2034,11 +2034,12 @@ mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, pte_t *pte; vm_page_t m; uint32_t pte_wbit; - + vm_paddr_t pa; + m = NULL; - vm_page_lock_queues(); + pa = 0; PMAP_LOCK(pmap); - +retry: pte = pte_find(mmu, pmap, va); if ((pte != NULL) && PTE_ISVALID(pte)) { if (pmap == kernel_pmap) @@ -2047,12 +2048,14 @@ mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, pte_wbit = PTE_UW; if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) + goto retry; m = PHYS_TO_VM_PAGE(PTE_PA(pte)); vm_page_hold(m); } } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } |