summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_map.c
diff options
context:
space:
mode:
authortegge <tegge@FreeBSD.org>2001-10-14 20:47:08 +0000
committertegge <tegge@FreeBSD.org>2001-10-14 20:47:08 +0000
commit2ac23a80c8fd6e5deb7056e5e94673f210a49a81 (patch)
tree5a4079ff7a06c76d260079e2a745da0728f6b38f /sys/vm/vm_map.c
parent8ef8a1b13f436d2f49044e9cab13f39e36047b92 (diff)
downloadFreeBSD-src-2ac23a80c8fd6e5deb7056e5e94673f210a49a81.zip
FreeBSD-src-2ac23a80c8fd6e5deb7056e5e94673f210a49a81.tar.gz
Fix locking violations during page wiring:
- vm map entries are not valid after the map has been unlocked. - An exclusive lock on the map is needed before calling vm_map_simplify_entry(). Fix cleanup after page wiring failure to unwire all pages that had been successfully wired before the failure was detected. Reviewed by: dillon
Diffstat (limited to 'sys/vm/vm_map.c')
-rw-r--r--sys/vm/vm_map.c35
1 files changed, 32 insertions, 3 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 895775b5..9bbe14b 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1412,6 +1412,7 @@ vm_map_user_pageable(
vm_map_entry_t entry;
vm_map_entry_t start_entry;
vm_offset_t estart;
+ vm_offset_t eend;
int rv;
vm_map_lock(map);
@@ -1489,6 +1490,7 @@ vm_map_user_pageable(
entry->wired_count++;
entry->eflags |= MAP_ENTRY_USER_WIRED;
estart = entry->start;
+ eend = entry->end;
/* First we need to allow map modifications */
vm_map_set_recursive(map);
@@ -1503,8 +1505,15 @@ vm_map_user_pageable(
vm_map_clear_recursive(map);
vm_map_unlock(map);
-
- (void) vm_map_user_pageable(map, start, entry->start, TRUE);
+
+ /*
+ * At this point, the map is unlocked, and
+ * entry might no longer be valid. Use copy
+ * of entry start value obtained while entry
+ * was valid.
+ */
+ (void) vm_map_user_pageable(map, start, estart,
+ TRUE);
return rv;
}
@@ -1514,9 +1523,15 @@ vm_map_user_pageable(
if (vm_map_lookup_entry(map, estart, &entry)
== FALSE) {
vm_map_unlock(map);
+ /*
+ * vm_fault_user_wire succeded, thus
+ * the area between start and eend
+ * is wired and has to be unwired
+ * here as part of the cleanup.
+ */
(void) vm_map_user_pageable(map,
start,
- estart,
+ eend,
TRUE);
return (KERN_INVALID_ADDRESS);
}
@@ -1753,6 +1768,20 @@ vm_map_pageable(
(void) vm_map_pageable(map, start, failed, TRUE);
return (rv);
}
+ /*
+ * An exclusive lock on the map is needed in order to call
+ * vm_map_simplify_entry(). If the current lock on the map
+ * is only a shared lock, an upgrade is needed.
+ */
+ if (vm_map_pmap(map) != kernel_pmap &&
+ vm_map_lock_upgrade(map)) {
+ vm_map_lock(map);
+ if (vm_map_lookup_entry(map, start, &start_entry) ==
+ FALSE) {
+ vm_map_unlock(map);
+ return KERN_SUCCESS;
+ }
+ }
vm_map_simplify_entry(map, start_entry);
}
OpenPOWER on IntegriCloud