diff options
author | alc <alc@FreeBSD.org> | 2011-12-30 18:16:15 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2011-12-30 18:16:15 +0000 |
commit | d65386a32fea92f57d713b7290622698243bbfa2 (patch) | |
tree | 1a36879554dc3c20626035a24799ab6da7836581 /sys/i386/xen | |
parent | e28679ec14f723663e64f5b4cd256d9d28129d97 (diff) | |
download | FreeBSD-src-d65386a32fea92f57d713b7290622698243bbfa2.zip FreeBSD-src-d65386a32fea92f57d713b7290622698243bbfa2.tar.gz |
Merge r216333 and r216555 from the native pmap
When r207410 eliminated the acquisition and release of the page queues
lock from pmap_extract_and_hold(), it didn't take into account that
pmap_pte_quick() sometimes requires the page queues lock to be held.
This change reimplements pmap_extract_and_hold() such that it no
longer uses pmap_pte_quick(), and thus never requires the page queues
lock.
Merge r177525 from the native pmap
Prevent the overflow in the calculation of the next page directory.
The overflow causes the wraparound with consequent corruption of the
(almost) whole address space mapping.
Strictly speaking, r177525 is not required by the Xen pmap because the
hypervisor steals the uppermost region of the normal kernel address
space. I am nonetheless merging it in order to reduce the number of
unnecessary differences between the native and Xen pmap implementations.
Tested by: sbruno
Diffstat (limited to 'sys/i386/xen')
-rw-r--r-- | sys/i386/xen/pmap.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index 53847c9..189f311 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -1122,7 +1122,7 @@ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pd_entry_t pde; - pt_entry_t pte; + pt_entry_t pte, *ptep; vm_page_t m; vm_paddr_t pa; @@ -1142,21 +1142,17 @@ retry: vm_page_hold(m); } } else { - sched_pin(); - pte = PT_GET(pmap_pte_quick(pmap, va)); - if (*PMAP1) - PT_SET_MA(PADDR1, 0); - if ((pte & PG_V) && + ptep = pmap_pte(pmap, va); + pte = PT_GET(ptep); + pmap_pte_release(ptep); + if (pte != 0 && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, - &pa)) { - sched_unpin(); + &pa)) goto retry; - } m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } - sched_unpin(); } } PA_UNLOCK_COND(pa); @@ -2316,6 +2312,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) * Calculate index for next page table. */ pdnxt = (sva + NBPDR) & ~PDRMASK; + if (pdnxt < sva) + pdnxt = eva; if (pmap->pm_stats.resident_count == 0) break; @@ -2471,6 +2469,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) u_int pdirindex; pdnxt = (sva + NBPDR) & ~PDRMASK; + if (pdnxt < sva) + pdnxt = eva; pdirindex = sva >> PDRSHIFT; ptpaddr = pmap->pm_pdir[pdirindex]; @@ -3172,6 +3172,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, ("pmap_copy: invalid to pmap_copy page tables")); pdnxt = (addr + NBPDR) & ~PDRMASK; + if (pdnxt < addr) + pdnxt = end_addr; ptepindex = addr >> PDRSHIFT; srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); |