summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c106
1 files changed, 81 insertions, 25 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 888af45..49398f2 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -167,6 +167,7 @@ static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
vm_page_t mpred);
static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
vm_paddr_t high);
+static int vm_page_alloc_fail(vm_object_t object, int req);
SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
@@ -1606,6 +1607,8 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
("inconsistent object(%p)/req(%x)", object, req));
+ KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
+ ("Can't sleep and retry object insertion."));
KASSERT(mpred == NULL || mpred->pindex < pindex,
("mpred %p doesn't precede pindex 0x%jx", mpred,
(uintmax_t)pindex));
@@ -1627,6 +1630,7 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
* Allocate a page if the number of free pages exceeds the minimum
* for the request class.
*/
+again:
mtx_lock(&vm_page_queue_free_mtx);
if (vm_cnt.v_free_count > vm_cnt.v_free_reserved ||
(req_class == VM_ALLOC_SYSTEM &&
@@ -1659,10 +1663,8 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
/*
* Not allocatable, give up.
*/
- mtx_unlock(&vm_page_queue_free_mtx);
- atomic_add_int(&vm_pageout_deficit,
- max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
- pagedaemon_wakeup();
+ if (vm_page_alloc_fail(object, req))
+ goto again;
return (NULL);
}
@@ -1716,6 +1718,11 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
m->busy_lock = VPB_UNBUSIED;
/* Don't change PG_ZERO. */
vm_page_free_toq(m);
+ if (req & VM_ALLOC_WAITFAIL) {
+ VM_OBJECT_WUNLOCK(object);
+ vm_radix_wait();
+ VM_OBJECT_WLOCK(object);
+ }
return (NULL);
}
@@ -1793,6 +1800,8 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object,
req));
+ KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
+ ("Can't sleep and retry object insertion."));
if (object != NULL) {
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
@@ -1818,6 +1827,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
* Can we allocate the pages without the number of free pages falling
* below the lower bound for the allocation class?
*/
+again:
mtx_lock(&vm_page_queue_free_mtx);
if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved ||
(req_class == VM_ALLOC_SYSTEM &&
@@ -1839,9 +1849,8 @@ retry:
m_ret = vm_phys_alloc_contig(npages, low, high,
alignment, boundary);
} else {
- mtx_unlock(&vm_page_queue_free_mtx);
- atomic_add_int(&vm_pageout_deficit, npages);
- pagedaemon_wakeup();
+ if (vm_page_alloc_fail(object, req))
+ goto again;
return (NULL);
}
if (m_ret != NULL) {
@@ -1910,6 +1919,11 @@ retry:
/* Don't change PG_ZERO. */
vm_page_free_toq(m);
}
+ if (req & VM_ALLOC_WAITFAIL) {
+ VM_OBJECT_WUNLOCK(object);
+ vm_radix_wait();
+ VM_OBJECT_WLOCK(object);
+ }
return (NULL);
}
mpred = m;
@@ -1982,18 +1996,17 @@ vm_page_alloc_freelist(int flind, int req)
/*
* Do not allocate reserved pages unless the req has asked for it.
*/
+again:
mtx_lock(&vm_page_queue_free_mtx);
if (vm_cnt.v_free_count > vm_cnt.v_free_reserved ||
(req_class == VM_ALLOC_SYSTEM &&
vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) ||
(req_class == VM_ALLOC_INTERRUPT &&
- vm_cnt.v_free_count > 0))
+ vm_cnt.v_free_count > 0)) {
m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
- else {
- mtx_unlock(&vm_page_queue_free_mtx);
- atomic_add_int(&vm_pageout_deficit,
- max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
- pagedaemon_wakeup();
+ } else {
+ if (vm_page_alloc_fail(NULL, req))
+ goto again;
return (NULL);
}
if (m == NULL) {
@@ -2557,11 +2570,11 @@ vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
* Sleep until free pages are available for allocation.
* - Called in various places before memory allocations.
*/
-void
-vm_wait(void)
+static void
+_vm_wait(void)
{
- mtx_lock(&vm_page_queue_free_mtx);
+ mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
if (curproc == pageproc) {
vm_pageout_pages_needed = 1;
msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
@@ -2579,6 +2592,46 @@ vm_wait(void)
}
}
+void
+vm_wait(void)
+{
+
+ mtx_lock(&vm_page_queue_free_mtx);
+ _vm_wait();
+}
+
+/*
+ * vm_page_alloc_fail:
+ *
+ * Called when a page allocation function fails. Informs the
+ * pagedaemon and performs the requested wait. Requires the
+ * page_queue_free and object lock on entry. Returns with the
+ * object lock held and free lock released. Returns an error when
+ * retry is necessary.
+ *
+ */
+static int
+vm_page_alloc_fail(vm_object_t object, int req)
+{
+
+ mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+
+ atomic_add_int(&vm_pageout_deficit,
+ max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
+ pagedaemon_wakeup();
+ if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
+ if (object != NULL)
+ VM_OBJECT_WUNLOCK(object);
+ _vm_wait();
+ if (object != NULL)
+ VM_OBJECT_WLOCK(object);
+ if (req & VM_ALLOC_WAITOK)
+ return (EAGAIN);
+ } else
+ mtx_unlock(&vm_page_queue_free_mtx);
+ return (0);
+}
+
/*
* vm_waitpfault: (also see VM_WAITPFAULT macro)
*
@@ -3190,11 +3243,16 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
{
vm_page_t m;
int sleep;
+ int pflags;
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
(allocflags & VM_ALLOC_IGN_SBUSY) != 0,
("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
+ pflags = allocflags &
+ ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
+ if ((allocflags & VM_ALLOC_NOWAIT) == 0)
+ pflags |= VM_ALLOC_WAITFAIL;
retrylookup:
if ((m = vm_page_lookup(object, pindex)) != NULL) {
sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
@@ -3228,13 +3286,10 @@ retrylookup:
return (m);
}
}
- m = vm_page_alloc(object, pindex, allocflags);
+ m = vm_page_alloc(object, pindex, pflags);
if (m == NULL) {
if ((allocflags & VM_ALLOC_NOWAIT) != 0)
return (NULL);
- VM_OBJECT_WUNLOCK(object);
- VM_WAIT;
- VM_OBJECT_WLOCK(object);
goto retrylookup;
}
if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
@@ -3273,6 +3328,7 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count)
{
vm_page_t m, mpred;
+ int pflags;
int i;
bool sleep;
@@ -3287,6 +3343,10 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch"));
if (count == 0)
return (0);
+ pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK |
+ VM_ALLOC_WAITFAIL | VM_ALLOC_IGN_SBUSY);
+ if ((allocflags & VM_ALLOC_NOWAIT) == 0)
+ pflags |= VM_ALLOC_WAITFAIL;
i = 0;
retrylookup:
m = vm_radix_lookup_le(&object->rtree, pindex + i);
@@ -3327,14 +3387,10 @@ retrylookup:
vm_page_sbusy(m);
} else {
m = vm_page_alloc_after(object, pindex + i,
- (allocflags & ~VM_ALLOC_IGN_SBUSY) |
- VM_ALLOC_COUNT(count - i), mpred);
+ pflags | VM_ALLOC_COUNT(count - i), mpred);
if (m == NULL) {
if ((allocflags & VM_ALLOC_NOWAIT) != 0)
break;
- VM_OBJECT_WUNLOCK(object);
- VM_WAIT;
- VM_OBJECT_WLOCK(object);
goto retrylookup;
}
}
OpenPOWER on IntegriCloud