summaryrefslogtreecommitdiffstats
path: root/sys/arm
diff options
context:
space:
mode:
authorcognet <cognet@FreeBSD.org>2006-07-11 11:22:06 +0000
committercognet <cognet@FreeBSD.org>2006-07-11 11:22:06 +0000
commit510c505cb511f6cff31809b1aeea2803371bef94 (patch)
tree8fb69ba40c9e70ce6f343d025eb55708378c15a8 /sys/arm
parent21b5baefd6277e63ad554f996fd121eb0e1b6ef0 (diff)
downloadFreeBSD-src-510c505cb511f6cff31809b1aeea2803371bef94.zip
FreeBSD-src-510c505cb511f6cff31809b1aeea2803371bef94.tar.gz
Add a new flag to pmap_enter_locked() to say if it's OK to wait. If it is, and
we're unable to allocate the memory for a PTE, we'll wait until we can. If not, we'll just return. Use M_NOWAIT|M_USE_RESERVE to allocate PTEs, it is less aggressive than M_NOWAIT alone. Suggested by: alc
Diffstat (limited to 'sys/arm')
-rw-r--r--sys/arm/arm/pmap.c32
1 files changed, 22 insertions, 10 deletions
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 7ebc5ea..6b23aa4 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -199,7 +199,7 @@ static void pmap_free_pv_entry (pv_entry_t);
static pv_entry_t pmap_get_pv_entry(void);
static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
- vm_prot_t, boolean_t);
+ vm_prot_t, boolean_t, int);
static void pmap_vac_me_harder(struct vm_page *, pmap_t,
vm_offset_t);
static void pmap_vac_me_kpmap(struct vm_page *, pmap_t,
@@ -373,7 +373,7 @@ struct l2_dtable {
* L2 allocation.
*/
#define pmap_alloc_l2_dtable() \
- (void*)uma_zalloc(l2table_zone, M_NOWAIT)
+ (void*)uma_zalloc(l2table_zone, M_NOWAIT|M_USE_RESERVE)
#define pmap_free_l2_dtable(l2) \
uma_zfree(l2table_zone, l2)
@@ -952,7 +952,7 @@ again_l2table:
again_ptep:
PMAP_UNLOCK(pm);
vm_page_unlock_queues();
- ptep = (void*)uma_zalloc(l2zone, M_NOWAIT);
+ ptep = (void*)uma_zalloc(l2zone, M_NOWAIT|M_USE_RESERVE);
vm_page_lock_queues();
PMAP_LOCK(pm);
if (l2b->l2b_kva != 0) {
@@ -3306,7 +3306,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_lock_queues();
PMAP_LOCK(pmap);
- pmap_enter_locked(pmap, va, m, prot, wired);
+ pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@@ -3316,7 +3316,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
static void
pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
+ boolean_t wired, int flags)
{
struct l2_bucket *l2b = NULL;
struct vm_page *opg;
@@ -3347,10 +3347,22 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
l2b = pmap_get_l2_bucket(pmap, va);
if (l2b == NULL)
l2b = pmap_grow_l2_bucket(pmap, va);
- } else
+ } else {
+do_l2b_alloc:
l2b = pmap_alloc_l2_bucket(pmap, va);
- KASSERT(l2b != NULL,
- ("pmap_enter: failed to allocate l2 bucket"));
+ if (l2b == NULL) {
+ if (flags & M_WAITOK) {
+ PMAP_UNLOCK(pmap);
+ vm_page_unlock_queues();
+ VM_WAIT;
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+ goto do_l2b_alloc;
+ }
+ return;
+ }
+ }
+
ptep = &l2b->l2b_kva[l2pte_index(va)];
opte = *ptep;
@@ -3557,7 +3569,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
- (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
m = TAILQ_NEXT(m, listq);
}
PMAP_UNLOCK(pmap);
@@ -3578,7 +3590,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
PMAP_LOCK(pmap);
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
- FALSE);
+ FALSE, M_NOWAIT);
PMAP_UNLOCK(pmap);
}
OpenPOWER on IntegriCloud