summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_kern.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2011-02-15 09:03:58 +0000
committerkib <kib@FreeBSD.org>2011-02-15 09:03:58 +0000
commitd20e0514a9bbaa474f28ba4e7acdcd82eb34d41d (patch)
tree789f50bb5d8c1ddd35a0410a53d7a5fd81ad15f7 /sys/vm/vm_kern.c
parentf7c03d5c1934e248a7ea0a85a4008cc92287845a (diff)
downloadFreeBSD-src-d20e0514a9bbaa474f28ba4e7acdcd82eb34d41d.zip
FreeBSD-src-d20e0514a9bbaa474f28ba4e7acdcd82eb34d41d.tar.gz
Since r218070 reenabled the call to vm_map_simplify_entry() from
vm_map_insert(), the kmem_back() assumption about newly inserted entry might be broken due to interference of two factors. In the low memory condition, when vm_page_alloc() returns NULL, supplied map is unlocked. If another thread performs kmem_malloc() meantime, and its map entry is placed right next to our thread map entry in the map, both entries wire count is still 0 and entries are coalesced due to vm_map_simplify_entry(). Mark new entry with MAP_ENTRY_IN_TRANSITION to prevent coalesce. Fix some style issues, tighten the assertions to account for MAP_ENTRY_IN_TRANSITION state. Reported and tested by: pho Reviewed by: alc
Diffstat (limited to 'sys/vm/vm_kern.c')
-rw-r--r--sys/vm/vm_kern.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index ae85db3..0360af7 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -353,12 +353,24 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
vm_map_entry_t entry;
vm_page_t m;
int pflags;
+ boolean_t found;
KASSERT(vm_map_locked(map), ("kmem_back: map %p is not locked", map));
offset = addr - VM_MIN_KERNEL_ADDRESS;
vm_object_reference(kmem_object);
vm_map_insert(map, kmem_object, offset, addr, addr + size,
- VM_PROT_ALL, VM_PROT_ALL, 0);
+ VM_PROT_ALL, VM_PROT_ALL, 0);
+
+ /*
+ * Assert: vm_map_insert() will never be able to extend the
+ * previous entry so vm_map_lookup_entry() will find a new
+ * entry exactly corresponding to this address range and it
+ * will have wired_count == 0.
+ */
+ found = vm_map_lookup_entry(map, addr, &entry);
+ KASSERT(found && entry->start == addr && entry->end == addr + size &&
+ entry->wired_count == 0 && (entry->eflags & MAP_ENTRY_IN_TRANSITION)
+ == 0, ("kmem_back: entry not found or misaligned"));
if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
@@ -381,9 +393,15 @@ retry:
if (m == NULL) {
if ((flags & M_NOWAIT) == 0) {
VM_OBJECT_UNLOCK(kmem_object);
+ entry->eflags |= MAP_ENTRY_IN_TRANSITION;
vm_map_unlock(map);
VM_WAIT;
vm_map_lock(map);
+ KASSERT(
+(entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_NEEDS_WAKEUP)) ==
+ MAP_ENTRY_IN_TRANSITION,
+ ("kmem_back: volatile entry"));
+ entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
VM_OBJECT_LOCK(kmem_object);
goto retry;
}
@@ -413,15 +431,11 @@ retry:
VM_OBJECT_UNLOCK(kmem_object);
/*
- * Mark map entry as non-pageable. Assert: vm_map_insert() will never
- * be able to extend the previous entry so there will be a new entry
- * exactly corresponding to this address range and it will have
- * wired_count == 0.
+ * Mark map entry as non-pageable. Repeat the assert.
*/
- if (!vm_map_lookup_entry(map, addr, &entry) ||
- entry->start != addr || entry->end != addr + size ||
- entry->wired_count != 0)
- panic("kmem_malloc: entry not found or misaligned");
+ KASSERT(entry->start == addr && entry->end == addr + size &&
+ entry->wired_count == 0,
+ ("kmem_back: entry not found or misaligned after allocation"));
entry->wired_count = 1;
/*
OpenPOWER on IntegriCloud