summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_fault.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-12-25 21:26:56 +0000
committeralc <alc@FreeBSD.org>2010-12-25 21:26:56 +0000
commit971b02b7bcce9c49f919797dcad7b646c206b7b6 (patch)
tree069da87e128b6f0f35be7629bb29fa3f3c2db97e /sys/vm/vm_fault.c
parent8e6dc31ba726f1333c1f0a995641a8d976ea3b4b (diff)
downloadFreeBSD-src-971b02b7bcce9c49f919797dcad7b646c206b7b6.zip
FreeBSD-src-971b02b7bcce9c49f919797dcad7b646c206b7b6.tar.gz
Introduce and use a new VM interface for temporarily pinning pages. This
new interface replaces the combined use of vm_fault_quick() and pmap_extract_and_hold() throughout the kernel. In collaboration with: kib@
Diffstat (limited to 'sys/vm/vm_fault.c')
-rw-r--r--sys/vm/vm_fault.c75
1 files changed, 75 insertions, 0 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 57b72e5..8e649d2 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1045,6 +1045,81 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
}
/*
+ * Hold each of the physical pages that are mapped by the specified range of
+ * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
+ * and allow the specified types of access, "prot". If all of the implied
+ * pages are successfully held, then the number of held pages is returned
+ * together with pointers to those pages in the array "ma". However, if any
+ * of the pages cannot be held, -1 is returned.
+ */
+int
+vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
+ vm_prot_t prot, vm_page_t *ma, int max_count)
+{
+ vm_offset_t end, va;
+ vm_page_t *mp;
+ int count;
+ boolean_t pmap_failed;
+
+ end = round_page(addr + len);
+ addr = trunc_page(addr);
+
+ /*
+ * Check for illegal addresses.
+ */
+ if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map))
+ return (-1);
+
+ count = howmany(end - addr, PAGE_SIZE);
+ if (count > max_count)
+ panic("vm_fault_quick_hold_pages: count > max_count");
+
+ /*
+ * Most likely, the physical pages are resident in the pmap, so it is
+ * faster to try pmap_extract_and_hold() first.
+ */
+ pmap_failed = FALSE;
+ for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) {
+ *mp = pmap_extract_and_hold(map->pmap, va, prot);
+ if (*mp == NULL)
+ pmap_failed = TRUE;
+ else if ((prot & VM_PROT_WRITE) != 0 &&
+ (*ma)->dirty != VM_PAGE_BITS_ALL) {
+ /*
+ * Explicitly dirty the physical page. Otherwise, the
+ * caller's changes may go unnoticed because they are
+ * performed through an unmanaged mapping or by a DMA
+ * operation.
+ */
+ vm_page_lock_queues();
+ vm_page_dirty(*mp);
+ vm_page_unlock_queues();
+ }
+ }
+ if (pmap_failed) {
+ /*
+ * One or more pages could not be held by the pmap. Either no
+ * page was mapped at the specified virtual address or that
+ * mapping had insufficient permissions. Attempt to fault in
+ * and hold these pages.
+ */
+ for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE)
+ if (*mp == NULL && vm_fault_hold(map, va, prot,
+ VM_FAULT_NORMAL, mp) != KERN_SUCCESS)
+ goto error;
+ }
+ return (count);
+error:
+ for (mp = ma; mp < ma + count; mp++)
+ if (*mp != NULL) {
+ vm_page_lock(*mp);
+ vm_page_unhold(*mp);
+ vm_page_unlock(*mp);
+ }
+ return (-1);
+}
+
+/*
* vm_fault_quick:
*
* Ensure that the requested virtual address, which may be in userland,
OpenPOWER on IntegriCloud