summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_object.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-09-01 07:58:15 +0000
committerkib <kib@FreeBSD.org>2014-09-01 07:58:15 +0000
commit798eea16149d6a39c6fb5f721410f61b5bb1134a (patch)
tree8a38ed27916582f1759cfcc70e76cd5fabbebce1 /sys/vm/vm_object.c
parent14d8fe45061d6304d3e6438cfe4267aa7e17c705 (diff)
downloadFreeBSD-src-798eea16149d6a39c6fb5f721410f61b5bb1134a.zip
FreeBSD-src-798eea16149d6a39c6fb5f721410f61b5bb1134a.tar.gz
Fix a leak of the wired pages when unwiring of the PROT_NONE-mapped
wired region. Rework the handling of unwire to do the it in batch, both at pmap and object level. All commits below are by alc. MFC r268327: Introduce pmap_unwire(). MFC r268591: Implement pmap_unwire() for powerpc. MFC r268776: Implement pmap_unwire() for arm. MFC r268806: pmap_unwire(9) man page. MFC r269134: When unwiring a region of an address space, do not assume that the underlying physical pages are mapped by the pmap. This fixes a leak of the wired pages on the unwiring of the region mapped with no access allowed. MFC r269339: In the implementation of the new function pmap_unwire(), the call to MOEA64_PVO_TO_PTE() must be performed before any changes are made to the PVO. Otherwise, MOEA64_PVO_TO_PTE() will panic. MFC r269365: Correct a long-standing problem in moea{,64}_pvo_enter() that was revealed by the combination of r268591 and r269134: When we attempt to add the wired attribute to an existing mapping, moea{,64}_pvo_enter() do nothing. (They only set the wired attribute on newly created mappings.) MFC r269433: Handle wiring failures in vm_map_wire() with the new functions pmap_unwire() and vm_object_unwire(). Retire vm_fault_{un,}wire(), since they are no longer used. MFC r269438: Rewrite a loop in vm_map_wire() so that gcc doesn't think that the variable "rv" is uninitialized. MFC r269485: Retire pmap_change_wiring(). Reviewed by: alc
Diffstat (limited to 'sys/vm/vm_object.c')
-rw-r--r--sys/vm/vm_object.c72
1 files changed, 72 insertions, 0 deletions
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 6cfb0d4..94c3d30 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -2203,6 +2203,78 @@ vm_object_set_writeable_dirty(vm_object_t object)
vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
}
+/*
+ * vm_object_unwire:
+ *
+ * For each page offset within the specified range of the given object,
+ * find the highest-level page in the shadow chain and unwire it. A page
+ * must exist at every page offset, and the highest-level page must be
+ * wired.
+ */
+void
+vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
+ uint8_t queue)
+{
+ vm_object_t tobject;
+ vm_page_t m, tm;
+ vm_pindex_t end_pindex, pindex, tpindex;
+ int depth, locked_depth;
+
+ KASSERT((offset & PAGE_MASK) == 0,
+ ("vm_object_unwire: offset is not page aligned"));
+ KASSERT((length & PAGE_MASK) == 0,
+ ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
+ /* The wired count of a fictitious page never changes. */
+ if ((object->flags & OBJ_FICTITIOUS) != 0)
+ return;
+ pindex = OFF_TO_IDX(offset);
+ end_pindex = pindex + atop(length);
+ locked_depth = 1;
+ VM_OBJECT_RLOCK(object);
+ m = vm_page_find_least(object, pindex);
+ while (pindex < end_pindex) {
+ if (m == NULL || pindex < m->pindex) {
+ /*
+ * The first object in the shadow chain doesn't
+ * contain a page at the current index. Therefore,
+ * the page must exist in a backing object.
+ */
+ tobject = object;
+ tpindex = pindex;
+ depth = 0;
+ do {
+ tpindex +=
+ OFF_TO_IDX(tobject->backing_object_offset);
+ tobject = tobject->backing_object;
+ KASSERT(tobject != NULL,
+ ("vm_object_unwire: missing page"));
+ if ((tobject->flags & OBJ_FICTITIOUS) != 0)
+ goto next_page;
+ depth++;
+ if (depth == locked_depth) {
+ locked_depth++;
+ VM_OBJECT_RLOCK(tobject);
+ }
+ } while ((tm = vm_page_lookup(tobject, tpindex)) ==
+ NULL);
+ } else {
+ tm = m;
+ m = TAILQ_NEXT(m, listq);
+ }
+ vm_page_lock(tm);
+ vm_page_unwire(tm, queue);
+ vm_page_unlock(tm);
+next_page:
+ pindex++;
+ }
+ /* Release the accumulated object locks. */
+ for (depth = 0; depth < locked_depth; depth++) {
+ tobject = object->backing_object;
+ VM_OBJECT_RUNLOCK(object);
+ object = tobject;
+ }
+}
+
#include "opt_ddb.h"
#ifdef DDB
#include <sys/kernel.h>
OpenPOWER on IntegriCloud