diff options
author | kmacy <kmacy@FreeBSD.org> | 2010-04-30 00:46:43 +0000 |
---|---|---|
committer | kmacy <kmacy@FreeBSD.org> | 2010-04-30 00:46:43 +0000 |
commit | 1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch) | |
tree | 250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/i386 | |
parent | a43425e8835983e7c93a15a2d6a42fdb292c1676 (diff) | |
download | FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz |
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks
(based on a patch by Jeff Roberson), I've implemented page lock
support in the MI code and have only moved vm_page's hold_count
out from under page queue mutex to page lock. This changes
pmap_extract_and_hold on all pmaps.
Supported by: Bitgravity Inc.
Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/i386')
-rw-r--r-- | sys/i386/i386/pmap.c | 11 | ||||
-rw-r--r-- | sys/i386/include/pmap.h | 3 | ||||
-rw-r--r-- | sys/i386/xen/pmap.c | 11 |
3 files changed, 21 insertions, 4 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index d8b9686..661fade 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -1346,14 +1346,19 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) pd_entry_t pde; pt_entry_t pte; vm_page_t m; + vm_paddr_t pa; + pa = 0; m = NULL; - vm_page_lock_queues(); PMAP_LOCK(pmap); +retry: pde = *pmap_pde(pmap, va); if (pde != 0) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { + if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | + (va & PDRMASK), &pa)) + goto retry; m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK)); vm_page_hold(m); @@ -1363,13 +1368,15 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) pte = *pmap_pte_quick(pmap, va); if (pte != 0 && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) + goto retry; m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } sched_unpin(); } } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h index ae7d79d..f45a9df 100644 --- a/sys/i386/include/pmap.h +++ b/sys/i386/include/pmap.h @@ -420,11 +420,14 @@ struct pmap { u_int pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statistics */ LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ + uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ + u_int pm_retries; #ifdef PAE pdpt_entry_t *pm_pdpt; /* KVA of page director pointer table */ #endif vm_page_t pm_root; /* spare page table pages */ + }; typedef struct pmap *pmap_t; diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index 18619c3..ec96ef0 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -1219,14 +1219,19 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) pd_entry_t pde; pt_entry_t pte; vm_page_t m; + vm_paddr_t pa; + pa = 0; m = NULL; - vm_page_lock_queues(); PMAP_LOCK(pmap); +retry: pde = PT_GET(pmap_pde(pmap, va)); if (pde != 0) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { + if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | + (va & PDRMASK), &pa)) + goto retry; m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK)); vm_page_hold(m); @@ -1238,13 +1243,15 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) PT_SET_MA(PADDR1, 0); if ((pte & PG_V) && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { + if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) + goto retry; m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } sched_unpin(); } } - vm_page_unlock_queues(); + PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } |