summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-08-08 17:12:03 +0000
committerkib <kib@FreeBSD.org>2014-08-08 17:12:03 +0000
commit094158b3f2a6f1f1c94fd8c1357192c5d7486a79 (patch)
tree0e7cb8ef3548f12ba3d175300ec28e89f06df725 /sys/i386
parent3ed5bde00cf51c1adc0152be002adec285130187 (diff)
downloadFreeBSD-src-094158b3f2a6f1f1c94fd8c1357192c5d7486a79.zip
FreeBSD-src-094158b3f2a6f1f1c94fd8c1357192c5d7486a79.tar.gz
Change pmap_enter(9) interface to take flags parameter and superpage
mapping size (currently unused). The flags includes the fault access bits, wired flag as PMAP_ENTER_WIRED, and a new flag PMAP_ENTER_NOSLEEP to indicate that pmap should not sleep. For powerpc aim both 32 and 64 bit, fix implementation to ensure that the requested mapping is created when PMAP_ENTER_NOSLEEP is not specified, in particular, wait for the available memory required to proceed. In collaboration with: alc Tested by: nwhitehorn (ppc aim32 and booke) Sponsored by: The FreeBSD Foundation and EMC / Isilon Storage Division MFC after: 2 weeks
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c29
-rw-r--r--sys/i386/xen/pmap.c51
2 files changed, 47 insertions, 33 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 8c21b0d..24c8f95 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3458,9 +3458,9 @@ setpte:
* or lose information. That is, this routine must actually
* insert this page into the given map NOW.
*/
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind)
{
pd_entry_t *pde;
pt_entry_t *pte;
@@ -3468,17 +3468,19 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pv_entry_t pv;
vm_paddr_t opa, pa;
vm_page_t mpte, om;
- boolean_t invlva;
+ boolean_t invlva, nosleep, wired;
va = trunc_page(va);
+ mpte = NULL;
+ wired = (flags & PMAP_ENTER_WIRED) != 0;
+ nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
+
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
- VM_OBJECT_ASSERT_WLOCKED(m->object);
-
- mpte = NULL;
+ VM_OBJECT_ASSERT_LOCKED(m->object);
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
@@ -3489,7 +3491,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
* resident, we are creating it here.
*/
if (va < VM_MAXUSER_ADDRESS) {
- mpte = pmap_allocpte(pmap, va, M_WAITOK);
+ mpte = pmap_allocpte(pmap, va, nosleep ? M_NOWAIT : M_WAITOK);
+ if (mpte == NULL) {
+ KASSERT(nosleep,
+ ("pmap_allocpte failed with sleep allowed"));
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
}
pde = pmap_pde(pmap, va);
@@ -3607,7 +3617,7 @@ validate:
*/
if ((origpte & ~(PG_M|PG_A)) != newpte) {
newpte |= PG_A;
- if ((access & VM_PROT_WRITE) != 0)
+ if ((flags & VM_PROT_WRITE) != 0)
newpte |= PG_M;
if (origpte & PG_V) {
invlva = FALSE;
@@ -3652,6 +3662,7 @@ validate:
sched_unpin();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
+ return (KERN_SUCCESS);
}
/*
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index fbfdcf7..3cc8937 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -298,9 +298,9 @@ static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m);
-static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
-static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
@@ -1546,21 +1546,17 @@ pmap_pinit(pmap_t pmap)
* mapped correctly.
*/
static vm_page_t
-_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
{
vm_paddr_t ptema;
vm_page_t m;
- KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
- (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
- ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
/*
* Allocate a page table page.
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
- if (flags & M_WAITOK) {
+ if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
VM_WAIT;
@@ -1595,16 +1591,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
}
static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
{
u_int ptepindex;
pd_entry_t ptema;
vm_page_t m;
- KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
- (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
- ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
/*
* Calculate pagetable page index
*/
@@ -1644,7 +1636,7 @@ retry:
CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x",
pmap, va, flags);
m = _pmap_allocpte(pmap, ptepindex, flags);
- if (m == NULL && (flags & M_WAITOK))
+ if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
goto retry;
KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex));
@@ -2643,9 +2635,9 @@ retry:
* or lose information. That is, this routine must actually
* insert this page into the given map NOW.
*/
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind __unused)
{
pd_entry_t *pde;
pt_entry_t *pte;
@@ -2653,19 +2645,21 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pv_entry_t pv;
vm_paddr_t opa, pa;
vm_page_t mpte, om;
- boolean_t invlva;
+ boolean_t invlva, wired;
- CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d",
- pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired);
+ CTR5(KTR_PMAP,
+ "pmap_enter: pmap=%08p va=0x%08x ma=0x%08x prot=0x%x flags=0x%x",
+ pmap, va, VM_PAGE_TO_MACH(m), prot, flags);
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
- VM_OBJECT_ASSERT_WLOCKED(m->object);
+ VM_OBJECT_ASSERT_LOCKED(m->object);
mpte = NULL;
+ wired = (flags & PMAP_ENTER_WIRED) != 0;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
@@ -2676,7 +2670,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
* resident, we are creating it here.
*/
if (va < VM_MAXUSER_ADDRESS) {
- mpte = pmap_allocpte(pmap, va, M_WAITOK);
+ mpte = pmap_allocpte(pmap, va, flags);
+ if (mpte == NULL) {
+ KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
+ ("pmap_allocpte failed with sleep allowed"));
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
}
pde = pmap_pde(pmap, va);
@@ -2842,6 +2844,7 @@ validate:
sched_unpin();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
+ return (KERN_SUCCESS);
}
/*
@@ -2996,7 +2999,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o
mpte->wire_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex,
- M_NOWAIT);
+ PMAP_ENTER_NOSLEEP);
if (mpte == NULL)
return (mpte);
}
@@ -3305,7 +3308,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*/
if ((ptetemp & PG_MANAGED) != 0) {
dstmpte = pmap_allocpte(dst_pmap, addr,
- M_NOWAIT);
+ PMAP_ENTER_NOSLEEP);
if (dstmpte == NULL)
goto out;
dst_pte = pmap_pte_quick(dst_pmap, addr);
OpenPOWER on IntegriCloud