summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-08-24 07:53:15 +0000
committerkib <kib@FreeBSD.org>2014-08-24 07:53:15 +0000
commit25782a7fab8e1a1c60517dab118fec0a98648dd6 (patch)
tree007faf87a4ebb0e25b4a426bc07ab2babbead757 /sys/vm
parentbee605bad28d58f69b83b3197efb0bd49b38de99 (diff)
downloadFreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.zip
FreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.tar.gz
Merge the changes to pmap_enter(9) for sleep-less operation (requested
by flag). The ia64 pmap.c changes are direct commit, since ia64 is removed on head. MFC r269368 (by alc): Retire PVO_EXECUTABLE. MFC r269728: Change pmap_enter(9) interface to take flags parameter and superpage mapping size (currently unused). MFC r269759 (by alc): Update the text of a KASSERT() to reflect the changes in r269728. MFC r269822 (by alc): Change {_,}pmap_allocpte() so that they look for the flag PMAP_ENTER_NOSLEEP instead of M_NOWAIT/M_WAITOK when deciding whether to sleep on page table page allocation. MFC r270151 (by alc): Replace KASSERT that no PV list locks are held with a conditional unlock. Reviewed by: alc Approved by: re (gjb) Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h11
-rw-r--r--sys/vm/vm_fault.c6
-rw-r--r--sys/vm/vm_kern.c11
3 files changed, 19 insertions, 9 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 0c45e33..2e7b19d 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -97,6 +97,13 @@ struct thread;
*/
extern vm_offset_t kernel_vm_end;
+/*
+ * Flags for pmap_enter(). The bits in the low-order byte are reserved
+ * for the protection code (vm_prot_t) that describes the fault type.
+ */
+#define PMAP_ENTER_NOSLEEP 0x0100
+#define PMAP_ENTER_WIRED 0x0200
+
void pmap_activate(struct thread *td);
void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
int advice);
@@ -108,8 +115,8 @@ void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset,
vm_page_t mb[], vm_offset_t b_offset, int xfersize);
-void pmap_enter(pmap_t, vm_offset_t, vm_prot_t, vm_page_t,
- vm_prot_t, boolean_t);
+int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, u_int flags, int8_t psind);
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 7ac8a86..70e1d73 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -907,7 +907,8 @@ vnode_locked:
* back on the active queue until later so that the pageout daemon
* won't find it (yet).
*/
- pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
+ pmap_enter(fs.map->pmap, vaddr, fs.m, prot,
+ fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0);
if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
VM_OBJECT_WLOCK(fs.object);
@@ -1369,7 +1370,8 @@ again:
* mapping is being replaced by a write-enabled
* mapping, then wire that new mapping.
*/
- pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade);
+ pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
+ access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
/*
* Mark it no longer busy, and put it on the active list.
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index dd075c1..4dfb7f5 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -202,8 +202,8 @@ retry:
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
- pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
- TRUE);
+ pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
+ VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
}
VM_OBJECT_WUNLOCK(object);
return (addr);
@@ -255,7 +255,8 @@ retry:
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
- pmap_enter(kernel_pmap, tmp, VM_PROT_ALL, m, VM_PROT_ALL, true);
+ pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
+ VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
tmp += PAGE_SIZE;
}
VM_OBJECT_WUNLOCK(object);
@@ -378,8 +379,8 @@ retry:
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
m->valid = VM_PAGE_BITS_ALL;
- pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
- TRUE);
+ pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
+ VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
}
VM_OBJECT_WUNLOCK(object);
OpenPOWER on IntegriCloud