summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2017-09-17 03:17:23 +0000
committerLuiz Souza <luiz@netgate.com>2018-02-21 15:13:39 -0300
commitff6af41082b99173f1f3faea0b7f58d1f70384e9 (patch)
tree8146c9e3f626a8a299f715482c1244d791fbdf17 /sys/vm
parentf0ea3d38dfcefcb14e12731990303f5a4ebb5fe4 (diff)
downloadFreeBSD-src-ff6af41082b99173f1f3faea0b7f58d1f70384e9.zip
FreeBSD-src-ff6af41082b99173f1f3faea0b7f58d1f70384e9.tar.gz
MFC r322296
Introduce vm_page_grab_pages(), which is intended to replace loops calling vm_page_grab() on consecutive page indices. Besides simplifying the code in the caller, vm_page_grab_pages() allows for batching optimizations. For example, the current implementation replaces calls to vm_page_lookup() on consecutive page indices by cheaper calls to vm_page_next(). (cherry picked from commit 9d710dfe3f1905122f3d9e3c84da8e4dc03363ee)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_glue.c20
-rw-r--r--sys/vm/vm_page.c94
-rw-r--r--sys/vm/vm_page.h16
3 files changed, 112 insertions, 18 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 60b822e..0655f74 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -322,7 +322,7 @@ vm_thread_new(struct thread *td, int pages)
{
vm_object_t ksobj;
vm_offset_t ks;
- vm_page_t m, ma[KSTACK_MAX_PAGES];
+ vm_page_t ma[KSTACK_MAX_PAGES];
struct kstack_cache_entry *ks_ce;
int i;
@@ -391,15 +391,10 @@ vm_thread_new(struct thread *td, int pages)
* page of stack.
*/
VM_OBJECT_WLOCK(ksobj);
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page.
- */
- m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
- VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
- ma[i] = m;
- m->valid = VM_PAGE_BITS_ALL;
- }
+ vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
+ VM_ALLOC_WIRED, ma, pages);
+ for (i = 0; i < pages; i++)
+ ma[i]->valid = VM_PAGE_BITS_ALL;
VM_OBJECT_WUNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
return (1);
@@ -573,9 +568,8 @@ vm_thread_swapin(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
VM_OBJECT_WLOCK(ksobj);
- for (int i = 0; i < pages; i++)
- ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL |
- VM_ALLOC_WIRED);
+ vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma,
+ pages);
for (int i = 0; i < pages;) {
int j, a, count, rv;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 3a5e84b..39df669 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -3130,6 +3130,100 @@ retrylookup:
}
/*
+ * Return the specified range of pages from the given object. For each
+ * page offset within the range, if a page already exists within the object
+ * at that offset and it is busy, then wait for it to change state. If,
+ * instead, the page doesn't exist, then allocate it.
+ *
+ * The caller must always specify an allocation class.
+ *
+ * allocation classes:
+ * VM_ALLOC_NORMAL normal process request
+ * VM_ALLOC_SYSTEM system *really* needs the pages
+ *
+ * The caller must always specify that the pages are to be busied and/or
+ * wired.
+ *
+ * optional allocation flags:
+ * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages
+ * VM_ALLOC_NOBUSY do not exclusive busy the page
+ * VM_ALLOC_SBUSY set page to sbusy state
+ * VM_ALLOC_WIRED wire the pages
+ * VM_ALLOC_ZERO zero and validate any invalid pages
+ *
+ * This routine may sleep.
+ */
+void
+vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
+ vm_page_t *ma, int count)
+{
+ vm_page_t m;
+ int i;
+ bool sleep;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
+ ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
+ KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
+ (allocflags & VM_ALLOC_WIRED) != 0,
+ ("vm_page_grab_pages: the pages must be busied or wired"));
+ KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
+ (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
+ ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch"));
+ if (count == 0)
+ return;
+ i = 0;
+retrylookup:
+ m = vm_page_lookup(object, pindex + i);
+ for (; i < count; i++) {
+ if (m != NULL) {
+ sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
+ vm_page_xbusied(m) : vm_page_busied(m);
+ if (sleep) {
+ /*
+ * Reference the page before unlocking and
+ * sleeping so that the page daemon is less
+ * likely to reclaim it.
+ */
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ vm_page_lock(m);
+ VM_OBJECT_WUNLOCK(object);
+ vm_page_busy_sleep(m, "grbmaw", (allocflags &
+ VM_ALLOC_IGN_SBUSY) != 0);
+ VM_OBJECT_WLOCK(object);
+ goto retrylookup;
+ }
+ if ((allocflags & VM_ALLOC_WIRED) != 0) {
+ vm_page_lock(m);
+ vm_page_wire(m);
+ vm_page_unlock(m);
+ }
+ if ((allocflags & (VM_ALLOC_NOBUSY |
+ VM_ALLOC_SBUSY)) == 0)
+ vm_page_xbusy(m);
+ if ((allocflags & VM_ALLOC_SBUSY) != 0)
+ vm_page_sbusy(m);
+ } else {
+ m = vm_page_alloc(object, pindex + i, (allocflags &
+ ~VM_ALLOC_IGN_SBUSY) | VM_ALLOC_COUNT(count - i));
+ if (m == NULL) {
+ VM_OBJECT_WUNLOCK(object);
+ VM_WAIT;
+ VM_OBJECT_WLOCK(object);
+ goto retrylookup;
+ }
+ }
+ if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) {
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+ m->valid = VM_PAGE_BITS_ALL;
+ }
+ ma[i] = m;
+ m = vm_page_next(m);
+ }
+}
+
+/*
* Mapping function for valid or dirty bits in a page.
*
* Inputs are required to range within a page.
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 00d04c2..e3df347 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -387,6 +387,9 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
* vm_page_alloc_freelist(). Some functions support only a subset
* of the flags, and ignore others, see the flags legend.
*
+ * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
+ * and the vm_page_grab*() functions. See these functions for details.
+ *
* Bits 0 - 1 define class.
* Bits 2 - 15 dedicated for flags.
* Legend:
@@ -394,6 +397,7 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
* (c) - vm_page_alloc_contig() supports the flag.
* (f) - vm_page_alloc_freelist() supports the flag.
* (g) - vm_page_grab() supports the flag.
+ * (p) - vm_page_grab_pages() supports the flag.
* Bits above 15 define the count of additional pages that the caller
* intends to allocate.
*/
@@ -401,15 +405,15 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
#define VM_ALLOC_INTERRUPT 1
#define VM_ALLOC_SYSTEM 2
#define VM_ALLOC_CLASS_MASK 3
-#define VM_ALLOC_WIRED 0x0020 /* (acfg) Allocate non pageable page */
-#define VM_ALLOC_ZERO 0x0040 /* (acfg) Try to obtain a zeroed page */
+#define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */
+#define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */
#define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */
-#define VM_ALLOC_NOBUSY 0x0200 /* (acg) Do not busy the page */
+#define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */
#define VM_ALLOC_IFCACHED 0x0400
#define VM_ALLOC_IFNOTCACHED 0x0800
-#define VM_ALLOC_IGN_SBUSY 0x1000 /* (g) Ignore shared busy flag */
+#define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
#define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
-#define VM_ALLOC_SBUSY 0x4000 /* (acg) Shared busy the page */
+#define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
#define VM_ALLOC_NOWAIT 0x8000 /* (g) Do not sleep, return NULL */
#define VM_ALLOC_COUNT_SHIFT 16
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
@@ -450,6 +454,8 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
vm_page_t vm_page_alloc_freelist(int, int);
void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
+void vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
+ vm_page_t *ma, int count);
int vm_page_try_to_free (vm_page_t);
void vm_page_deactivate (vm_page_t);
void vm_page_deactivate_noreuse(vm_page_t);
OpenPOWER on IntegriCloud