summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormarkj <markj@FreeBSD.org>2018-01-08 16:36:33 +0000
committerLuiz Souza <luiz@netgate.com>2018-02-21 15:15:52 -0300
commitd3bf3c213957fc81f5caa9afd853c742e293c1ef (patch)
tree978a4805d514ad64d1214e1a7fd0d391c88e61da
parent2546f9b8e48cef95bc882b68d42e05ca3f33078d (diff)
downloadFreeBSD-src-d3bf3c213957fc81f5caa9afd853c742e293c1ef.zip
FreeBSD-src-d3bf3c213957fc81f5caa9afd853c742e293c1ef.tar.gz
MFC r322547:
Add vm_page_alloc_after(). (cherry picked from commit 8e264f308c8b33afa7e707ce0f70254f4e1bea1b)
-rw-r--r--sys/vm/vm_kern.c13
-rw-r--r--sys/vm/vm_page.c47
-rw-r--r--sys/vm/vm_page.h3
3 files changed, 42 insertions, 21 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index af678ac..8e9ab76 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_radix.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
@@ -329,7 +330,7 @@ int
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
{
vm_offset_t offset, i;
- vm_page_t m;
+ vm_page_t m, mpred;
int pflags;
KASSERT(object == kmem_object || object == kernel_object,
@@ -338,10 +339,13 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
- VM_OBJECT_WLOCK(object);
- for (i = 0; i < size; i += PAGE_SIZE) {
+ i = 0;
retry:
- m = vm_page_alloc(object, atop(offset + i), pflags);
+ VM_OBJECT_WLOCK(object);
+ mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
+ for (; i < size; i += PAGE_SIZE, mpred = m) {
+ m = vm_page_alloc_after(object, atop(offset + i), pflags,
+ mpred);
/*
* Ran out of space, free everything up and return. Don't need
@@ -352,7 +356,6 @@ retry:
VM_OBJECT_WUNLOCK(object);
if ((flags & M_NOWAIT) == 0) {
VM_WAIT;
- VM_OBJECT_WLOCK(object);
goto retry;
}
kmem_unback(object, addr, i);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 6553598..888af45 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1583,15 +1583,32 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
vm_page_t
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
{
- vm_page_t m, mpred;
+
+ return (vm_page_alloc_after(object, pindex, req, object != NULL ?
+ vm_radix_lookup_le(&object->rtree, pindex) : NULL));
+}
+
+/*
+ * Allocate a page in the specified object with the given page index. To
+ * optimize insertion of the page into the object, the caller must also specifiy
+ * the resident page in the object with largest index smaller than the given
+ * page index, or NULL if no such page exists.
+ */
+vm_page_t
+vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
+ vm_page_t mpred)
+{
+ vm_page_t m;
int flags, req_class;
- mpred = NULL; /* XXX: pacify gcc */
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
- ("vm_page_alloc: inconsistent object(%p)/req(%x)", object, req));
+ ("inconsistent object(%p)/req(%x)", object, req));
+ KASSERT(mpred == NULL || mpred->pindex < pindex,
+ ("mpred %p doesn't precede pindex 0x%jx", mpred,
+ (uintmax_t)pindex));
if (object != NULL)
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -1606,12 +1623,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
req_class = VM_ALLOC_SYSTEM;
- if (object != NULL) {
- mpred = vm_radix_lookup_le(&object->rtree, pindex);
- KASSERT(mpred == NULL || mpred->pindex != pindex,
- ("vm_page_alloc: pindex already allocated"));
- }
-
/*
* Allocate a page if the number of free pages exceeds the minimum
* for the request class.
@@ -1658,7 +1669,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
/*
* At this point we had better have found a good page.
*/
- KASSERT(m != NULL, ("vm_page_alloc: missing page"));
+ KASSERT(m != NULL, ("missing page"));
vm_phys_freecnt_adj(m, -1);
if ((m->flags & PG_ZERO) != 0)
vm_page_zero_count--;
@@ -3261,7 +3272,7 @@ int
vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count)
{
- vm_page_t m;
+ vm_page_t m, mpred;
int i;
bool sleep;
@@ -3278,7 +3289,12 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
return (0);
i = 0;
retrylookup:
- m = vm_page_lookup(object, pindex + i);
+ m = vm_radix_lookup_le(&object->rtree, pindex + i);
+ if (m == NULL || m->pindex != pindex + i) {
+ mpred = m;
+ m = NULL;
+ } else
+ mpred = TAILQ_PREV(m, pglist, listq);
for (; i < count; i++) {
if (m != NULL) {
sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
@@ -3310,8 +3326,9 @@ retrylookup:
if ((allocflags & VM_ALLOC_SBUSY) != 0)
vm_page_sbusy(m);
} else {
- m = vm_page_alloc(object, pindex + i, (allocflags &
- ~VM_ALLOC_IGN_SBUSY) | VM_ALLOC_COUNT(count - i));
+ m = vm_page_alloc_after(object, pindex + i,
+ (allocflags & ~VM_ALLOC_IGN_SBUSY) |
+ VM_ALLOC_COUNT(count - i), mpred);
if (m == NULL) {
if ((allocflags & VM_ALLOC_NOWAIT) != 0)
break;
@@ -3326,7 +3343,7 @@ retrylookup:
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
- ma[i] = m;
+ ma[i] = mpred = m;
m = vm_page_next(m);
}
return (i);
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 38d625c..3bb33fc 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -459,7 +459,8 @@ void vm_page_free_zero(vm_page_t m);
void vm_page_activate (vm_page_t);
void vm_page_advise(vm_page_t m, int advice);
-vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
+vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
+vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_memattr_t memattr);
OpenPOWER on IntegriCloud