summaryrefslogtreecommitdiffstats
path: root/sys/arm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-08-24 07:53:15 +0000
committerkib <kib@FreeBSD.org>2014-08-24 07:53:15 +0000
commit25782a7fab8e1a1c60517dab118fec0a98648dd6 (patch)
tree007faf87a4ebb0e25b4a426bc07ab2babbead757 /sys/arm
parentbee605bad28d58f69b83b3197efb0bd49b38de99 (diff)
downloadFreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.zip
FreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.tar.gz
Merge the changes to pmap_enter(9) for sleep-less operation (requested
by flag). The ia64 pmap.c changes are direct commit, since ia64 is removed on head. MFC r269368 (by alc): Retire PVO_EXECUTABLE. MFC r269728: Change pmap_enter(9) interface to take flags parameter and superpage mapping size (currently unused). MFC r269759 (by alc): Update the text of a KASSERT() to reflect the changes in r269728. MFC r269822 (by alc): Change {_,}pmap_allocpte() so that they look for the flag PMAP_ENTER_NOSLEEP instead of M_NOWAIT/M_WAITOK when deciding whether to sleep on page table page allocation. MFC r270151 (by alc): Replace KASSERT that no PV list locks are held with a conditional unlock. Reviewed by: alc Approved by: re (gjb) Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/arm')
-rw-r--r--sys/arm/arm/pmap-v6.c73
-rw-r--r--sys/arm/arm/pmap.c41
2 files changed, 58 insertions, 56 deletions
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 2690092..197a2eb 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -231,8 +231,8 @@ static boolean_t pmap_pv_insert_section(pmap_t, vm_offset_t,
static struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vm_offset_t);
static int pmap_pvh_wired_mappings(struct md_page *, int);
-static void pmap_enter_locked(pmap_t, vm_offset_t, vm_prot_t,
- vm_page_t, vm_prot_t, boolean_t, int);
+static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+ vm_prot_t, u_int);
static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va);
static void pmap_alloc_l1(pmap_t);
static void pmap_free_l1(pmap_t);
@@ -2944,35 +2944,38 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* insert this page into the given map NOW.
*/
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind __unused)
{
struct l2_bucket *l2b;
+ int rv;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
- pmap_enter_locked(pmap, va, access, m, prot, wired, M_WAITOK);
- /*
- * If both the l2b_occupancy and the reservation are fully
- * populated, then attempt promotion.
- */
- l2b = pmap_get_l2_bucket(pmap, va);
- if ((l2b != NULL) && (l2b->l2b_occupancy == L2_PTE_NUM_TOTAL) &&
- sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
- vm_reserv_level_iffullpop(m) == 0)
- pmap_promote_section(pmap, va);
-
+ rv = pmap_enter_locked(pmap, va, m, prot, flags);
+ if (rv == KERN_SUCCESS) {
+ /*
+ * If both the l2b_occupancy and the reservation are fully
+ * populated, then attempt promotion.
+ */
+ l2b = pmap_get_l2_bucket(pmap, va);
+ if (l2b != NULL && l2b->l2b_occupancy == L2_PTE_NUM_TOTAL &&
+ sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
+ vm_reserv_level_iffullpop(m) == 0)
+ pmap_promote_section(pmap, va);
+ }
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
+ return (rv);
}
/*
* The pvh global and pmap locks must be held.
*/
-static void
-pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
- vm_prot_t prot, boolean_t wired, int flags)
+static int
+pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags)
{
struct l2_bucket *l2b = NULL;
struct vm_page *om;
@@ -2990,9 +2993,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pa = systempage.pv_pa;
m = NULL;
} else {
- KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
- vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
- ("pmap_enter_locked: page %p is not busy", m));
+ if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+ VM_OBJECT_ASSERT_LOCKED(m->object);
pa = VM_PAGE_TO_PHYS(m);
}
@@ -3013,12 +3015,12 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if (prot & VM_PROT_WRITE)
nflags |= PVF_WRITE;
- if (wired)
+ if ((flags & PMAP_ENTER_WIRED) != 0)
nflags |= PVF_WIRED;
PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, "
- "prot = %x, wired = %x\n", (uint32_t) pmap, va, (uint32_t) m,
- prot, wired));
+ "prot = %x, flags = %x\n", (uint32_t) pmap, va, (uint32_t) m,
+ prot, flags));
if (pmap == pmap_kernel()) {
l2b = pmap_get_l2_bucket(pmap, va);
@@ -3028,7 +3030,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
do_l2b_alloc:
l2b = pmap_alloc_l2_bucket(pmap, va);
if (l2b == NULL) {
- if (flags & M_WAITOK) {
+ if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
VM_WAIT;
@@ -3036,7 +3038,7 @@ do_l2b_alloc:
PMAP_LOCK(pmap);
goto do_l2b_alloc;
}
- return;
+ return (KERN_RESOURCE_SHORTAGE);
}
}
@@ -3195,6 +3197,7 @@ validate:
if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
cpu_icache_sync_range(va, PAGE_SIZE);
+ return (KERN_SUCCESS);
}
/*
@@ -3216,13 +3219,12 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_offset_t va;
vm_page_t m;
vm_pindex_t diff, psize;
- vm_prot_t access;
VM_OBJECT_ASSERT_LOCKED(m_start->object);
psize = atop(end - start);
m = m_start;
- access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
+ prot &= VM_PROT_READ | VM_PROT_EXECUTE;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
@@ -3232,8 +3234,8 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
pmap_enter_section(pmap, va, m, prot))
m = &m[L1_S_SIZE / PAGE_SIZE - 1];
else
- pmap_enter_locked(pmap, va, access, m, prot,
- FALSE, M_NOWAIT);
+ pmap_enter_locked(pmap, va, m, prot,
+ PMAP_ENTER_NOSLEEP);
m = TAILQ_NEXT(m, listq);
}
PMAP_UNLOCK(pmap);
@@ -3252,12 +3254,11 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
- vm_prot_t access;
- access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
+ prot &= VM_PROT_READ | VM_PROT_EXECUTE;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
- pmap_enter_locked(pmap, va, access, m, prot, FALSE, M_NOWAIT);
+ pmap_enter_locked(pmap, va, m, prot, PMAP_ENTER_NOSLEEP);
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
}
@@ -3488,8 +3489,8 @@ pmap_pinit(pmap_t pmap)
pmap->pm_stats.resident_count = 1;
if (vector_page < KERNBASE) {
pmap_enter(pmap, vector_page,
- VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
- VM_PROT_READ, 1);
+ PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ,
+ PMAP_ENTER_WIRED, 0);
}
return (1);
}
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index d19306d..802d5ee 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -199,8 +199,8 @@ extern int last_fault_code;
static void pmap_free_pv_entry (pv_entry_t);
static pv_entry_t pmap_get_pv_entry(void);
-static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
- vm_prot_t, boolean_t, int);
+static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+ vm_prot_t, u_int);
static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va);
static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t);
static void pmap_alloc_l1(pmap_t);
@@ -3204,24 +3204,26 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* insert this page into the given map NOW.
*/
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind __unused)
{
+ int rv;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
- pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
+ rv = pmap_enter_locked(pmap, va, m, prot, flags);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
+ return (rv);
}
/*
* The pvh global and pmap locks must be held.
*/
-static void
+static int
pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired, int flags)
+ u_int flags)
{
struct l2_bucket *l2b = NULL;
struct vm_page *opg;
@@ -3237,9 +3239,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pa = systempage.pv_pa;
m = NULL;
} else {
- KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
- vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
- ("pmap_enter_locked: page %p is not busy", m));
+ if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+ VM_OBJECT_ASSERT_LOCKED(m->object);
pa = VM_PAGE_TO_PHYS(m);
}
nflags = 0;
@@ -3247,10 +3248,10 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
nflags |= PVF_WRITE;
if (prot & VM_PROT_EXECUTE)
nflags |= PVF_EXEC;
- if (wired)
+ if ((flags & PMAP_ENTER_WIRED) != 0)
nflags |= PVF_WIRED;
PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
- "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
+ "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags));
if (pmap == pmap_kernel()) {
l2b = pmap_get_l2_bucket(pmap, va);
@@ -3260,7 +3261,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
do_l2b_alloc:
l2b = pmap_alloc_l2_bucket(pmap, va);
if (l2b == NULL) {
- if (flags & M_WAITOK) {
+ if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
VM_WAIT;
@@ -3268,7 +3269,7 @@ do_l2b_alloc:
PMAP_LOCK(pmap);
goto do_l2b_alloc;
}
- return;
+ return (KERN_RESOURCE_SHORTAGE);
}
}
@@ -3482,6 +3483,7 @@ do_l2b_alloc:
if (m)
pmap_fix_cache(m, pmap, va);
}
+ return (KERN_SUCCESS);
}
/*
@@ -3511,7 +3513,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
- (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
+ (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP);
m = TAILQ_NEXT(m, listq);
}
rw_wunlock(&pvh_global_lock);
@@ -3534,7 +3536,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
- FALSE, M_NOWAIT);
+ PMAP_ENTER_NOSLEEP);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@@ -3746,9 +3748,8 @@ pmap_pinit(pmap_t pmap)
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
pmap->pm_stats.resident_count = 1;
if (vector_page < KERNBASE) {
- pmap_enter(pmap, vector_page,
- VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
- VM_PROT_READ, 1);
+ pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa),
+ VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0);
}
return (1);
}
OpenPOWER on IntegriCloud