summaryrefslogtreecommitdiffstats
path: root/sys/i386/xen/pmap.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-08-24 07:53:15 +0000
committerkib <kib@FreeBSD.org>2014-08-24 07:53:15 +0000
commit25782a7fab8e1a1c60517dab118fec0a98648dd6 (patch)
tree007faf87a4ebb0e25b4a426bc07ab2babbead757 /sys/i386/xen/pmap.c
parentbee605bad28d58f69b83b3197efb0bd49b38de99 (diff)
downloadFreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.zip
FreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.tar.gz
Merge the changes to pmap_enter(9) for sleep-less operation (requested
by flag). The ia64 pmap.c changes are direct commit, since ia64 is removed on head. MFC r269368 (by alc): Retire PVO_EXECUTABLE. MFC r269728: Change pmap_enter(9) interface to take flags parameter and superpage mapping size (currently unused). MFC r269759 (by alc): Update the text of a KASSERT() to reflect the changes in r269728. MFC r269822 (by alc): Change {_,}pmap_allocpte() so that they look for the flag PMAP_ENTER_NOSLEEP instead of M_NOWAIT/M_WAITOK when deciding whether to sleep on page table page allocation. MFC r270151 (by alc): Replace KASSERT that no PV list locks are held with a conditional unlock. Reviewed by: alc Approved by: re (gjb) Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/i386/xen/pmap.c')
-rw-r--r--sys/i386/xen/pmap.c51
1 files changed, 27 insertions, 24 deletions
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index fa9bda8..fb7781d 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -298,9 +298,9 @@ static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m);
-static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
-static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
@@ -1546,21 +1546,17 @@ pmap_pinit(pmap_t pmap)
* mapped correctly.
*/
static vm_page_t
-_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
{
vm_paddr_t ptema;
vm_page_t m;
- KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
- (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
- ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
/*
* Allocate a page table page.
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
- if (flags & M_WAITOK) {
+ if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
VM_WAIT;
@@ -1595,16 +1591,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
}
static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
{
u_int ptepindex;
pd_entry_t ptema;
vm_page_t m;
- KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
- (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
- ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
/*
* Calculate pagetable page index
*/
@@ -1644,7 +1636,7 @@ retry:
CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x",
pmap, va, flags);
m = _pmap_allocpte(pmap, ptepindex, flags);
- if (m == NULL && (flags & M_WAITOK))
+ if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
goto retry;
KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex));
@@ -2643,9 +2635,9 @@ retry:
* or lose information. That is, this routine must actually
* insert this page into the given map NOW.
*/
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind __unused)
{
pd_entry_t *pde;
pt_entry_t *pte;
@@ -2653,19 +2645,21 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pv_entry_t pv;
vm_paddr_t opa, pa;
vm_page_t mpte, om;
- boolean_t invlva;
+ boolean_t invlva, wired;
- CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d",
- pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired);
+ CTR5(KTR_PMAP,
+ "pmap_enter: pmap=%08p va=0x%08x ma=0x%08x prot=0x%x flags=0x%x",
+ pmap, va, VM_PAGE_TO_MACH(m), prot, flags);
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
- VM_OBJECT_ASSERT_WLOCKED(m->object);
+ VM_OBJECT_ASSERT_LOCKED(m->object);
mpte = NULL;
+ wired = (flags & PMAP_ENTER_WIRED) != 0;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
@@ -2676,7 +2670,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
* resident, we are creating it here.
*/
if (va < VM_MAXUSER_ADDRESS) {
- mpte = pmap_allocpte(pmap, va, M_WAITOK);
+ mpte = pmap_allocpte(pmap, va, flags);
+ if (mpte == NULL) {
+ KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
+ ("pmap_allocpte failed with sleep allowed"));
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
}
pde = pmap_pde(pmap, va);
@@ -2842,6 +2844,7 @@ validate:
sched_unpin();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
+ return (KERN_SUCCESS);
}
/*
@@ -2996,7 +2999,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o
mpte->wire_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex,
- M_NOWAIT);
+ PMAP_ENTER_NOSLEEP);
if (mpte == NULL)
return (mpte);
}
@@ -3287,7 +3290,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*/
if ((ptetemp & PG_MANAGED) != 0) {
dstmpte = pmap_allocpte(dst_pmap, addr,
- M_NOWAIT);
+ PMAP_ENTER_NOSLEEP);
if (dstmpte == NULL)
goto out;
dst_pte = pmap_pte_quick(dst_pmap, addr);
OpenPOWER on IntegriCloud