summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_map.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2002-06-14 18:21:01 +0000
committeralc <alc@FreeBSD.org>2002-06-14 18:21:01 +0000
commit42cf959f1837b06c3ec3b8fa7bb33a47c85b92af (patch)
treee222c1be9c51b6924d642c93a4e8629cdae24952 /sys/vm/vm_map.c
parentddf3317becaf1f927336e43f96f900868c8d0a80 (diff)
downloadFreeBSD-src-42cf959f1837b06c3ec3b8fa7bb33a47c85b92af.zip
FreeBSD-src-42cf959f1837b06c3ec3b8fa7bb33a47c85b92af.tar.gz
o Use vm_map_wire() and vm_map_unwire() in place of vm_map_pageable() and
vm_map_user_pageable(). o Remove vm_map_pageable() and vm_map_user_pageable(). o Remove vm_map_clear_recursive() and vm_map_set_recursive(). (They were only used by vm_map_pageable() and vm_map_user_pageable().) Reviewed by: tegge
Diffstat (limited to 'sys/vm/vm_map.c')
-rw-r--r--sys/vm/vm_map.c397
1 files changed, 0 insertions, 397 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index fadf7cc..85b88b4 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -413,16 +413,6 @@ _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
("%s: lock not held", __func__));
}
-void
-_vm_map_set_recursive(vm_map_t map, const char *file, int line)
-{
-}
-
-void
-_vm_map_clear_recursive(vm_map_t map, const char *file, int line)
-{
-}
-
/*
* vm_map_unlock_and_wait:
*/
@@ -1803,393 +1793,6 @@ done:
}
/*
- * Implement the semantics of mlock
- */
-int
-vm_map_user_pageable(
- vm_map_t map,
- vm_offset_t start,
- vm_offset_t end,
- boolean_t new_pageable)
-{
- vm_map_entry_t entry;
- vm_map_entry_t start_entry;
- vm_offset_t estart;
- vm_offset_t eend;
- int rv;
-
- vm_map_lock(map);
- VM_MAP_RANGE_CHECK(map, start, end);
-
- if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
- vm_map_unlock(map);
- return (KERN_INVALID_ADDRESS);
- }
-
- if (new_pageable) {
-
- entry = start_entry;
- vm_map_clip_start(map, entry, start);
-
- /*
- * Now decrement the wiring count for each region. If a region
- * becomes completely unwired, unwire its physical pages and
- * mappings.
- */
- while ((entry != &map->header) && (entry->start < end)) {
- if (entry->eflags & MAP_ENTRY_USER_WIRED) {
- vm_map_clip_end(map, entry, end);
- entry->eflags &= ~MAP_ENTRY_USER_WIRED;
- entry->wired_count--;
- if (entry->wired_count == 0)
- vm_fault_unwire(map, entry->start, entry->end);
- }
- vm_map_simplify_entry(map,entry);
- entry = entry->next;
- }
- } else {
-
- entry = start_entry;
-
- while ((entry != &map->header) && (entry->start < end)) {
-
- if (entry->eflags & MAP_ENTRY_USER_WIRED) {
- entry = entry->next;
- continue;
- }
-
- if (entry->wired_count != 0) {
- entry->wired_count++;
- entry->eflags |= MAP_ENTRY_USER_WIRED;
- entry = entry->next;
- continue;
- }
-
- /* Here on entry being newly wired */
-
- if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
- int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
- if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
-
- vm_object_shadow(&entry->object.vm_object,
- &entry->offset,
- atop(entry->end - entry->start));
- entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
-
- } else if (entry->object.vm_object == NULL &&
- !map->system_map) {
-
- entry->object.vm_object =
- vm_object_allocate(OBJT_DEFAULT,
- atop(entry->end - entry->start));
- entry->offset = (vm_offset_t) 0;
-
- }
- }
-
- vm_map_clip_start(map, entry, start);
- vm_map_clip_end(map, entry, end);
-
- entry->wired_count++;
- entry->eflags |= MAP_ENTRY_USER_WIRED;
- estart = entry->start;
- eend = entry->end;
-
- /* First we need to allow map modifications */
- vm_map_set_recursive(map);
- vm_map_lock_downgrade(map);
- map->timestamp++;
-
- rv = vm_fault_user_wire(map, entry->start, entry->end);
- if (rv) {
-
- entry->wired_count--;
- entry->eflags &= ~MAP_ENTRY_USER_WIRED;
-
- vm_map_clear_recursive(map);
- vm_map_unlock(map);
-
- /*
- * At this point, the map is unlocked, and
- * entry might no longer be valid. Use copy
- * of entry start value obtained while entry
- * was valid.
- */
- (void) vm_map_user_pageable(map, start, estart,
- TRUE);
- return rv;
- }
-
- vm_map_clear_recursive(map);
- if (vm_map_lock_upgrade(map)) {
- vm_map_lock(map);
- if (vm_map_lookup_entry(map, estart, &entry)
- == FALSE) {
- vm_map_unlock(map);
- /*
- * vm_fault_user_wire succeded, thus
- * the area between start and eend
- * is wired and has to be unwired
- * here as part of the cleanup.
- */
- (void) vm_map_user_pageable(map,
- start,
- eend,
- TRUE);
- return (KERN_INVALID_ADDRESS);
- }
- }
- vm_map_simplify_entry(map,entry);
- }
- }
- map->timestamp++;
- vm_map_unlock(map);
- return KERN_SUCCESS;
-}
-
-/*
- * vm_map_pageable:
- *
- * Sets the pageability of the specified address
- * range in the target map. Regions specified
- * as not pageable require locked-down physical
- * memory and physical page maps.
- *
- * The map must not be locked, but a reference
- * must remain to the map throughout the call.
- */
-int
-vm_map_pageable(
- vm_map_t map,
- vm_offset_t start,
- vm_offset_t end,
- boolean_t new_pageable)
-{
- vm_map_entry_t entry;
- vm_map_entry_t start_entry;
- vm_offset_t failed = 0;
- int rv;
-
- GIANT_REQUIRED;
-
- vm_map_lock(map);
-
- VM_MAP_RANGE_CHECK(map, start, end);
-
- /*
- * Only one pageability change may take place at one time, since
- * vm_fault assumes it will be called only once for each
- * wiring/unwiring. Therefore, we have to make sure we're actually
- * changing the pageability for the entire region. We do so before
- * making any changes.
- */
- if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
- vm_map_unlock(map);
- return (KERN_INVALID_ADDRESS);
- }
- entry = start_entry;
-
- /*
- * Actions are rather different for wiring and unwiring, so we have
- * two separate cases.
- */
- if (new_pageable) {
- vm_map_clip_start(map, entry, start);
-
- /*
- * Unwiring. First ensure that the range to be unwired is
- * really wired down and that there are no holes.
- */
- while ((entry != &map->header) && (entry->start < end)) {
- if (entry->wired_count == 0 ||
- (entry->end < end &&
- (entry->next == &map->header ||
- entry->next->start > entry->end))) {
- vm_map_unlock(map);
- return (KERN_INVALID_ARGUMENT);
- }
- entry = entry->next;
- }
-
- /*
- * Now decrement the wiring count for each region. If a region
- * becomes completely unwired, unwire its physical pages and
- * mappings.
- */
- entry = start_entry;
- while ((entry != &map->header) && (entry->start < end)) {
- vm_map_clip_end(map, entry, end);
-
- entry->wired_count--;
- if (entry->wired_count == 0)
- vm_fault_unwire(map, entry->start, entry->end);
-
- vm_map_simplify_entry(map, entry);
-
- entry = entry->next;
- }
- } else {
- /*
- * Wiring. We must do this in two passes:
- *
- * 1. Holding the write lock, we create any shadow or zero-fill
- * objects that need to be created. Then we clip each map
- * entry to the region to be wired and increment its wiring
- * count. We create objects before clipping the map entries
- * to avoid object proliferation.
- *
- * 2. We downgrade to a read lock, and call vm_fault_wire to
- * fault in the pages for any newly wired area (wired_count is
- * 1).
- *
- * Downgrading to a read lock for vm_fault_wire avoids a possible
- * deadlock with another process that may have faulted on one
- * of the pages to be wired (it would mark the page busy,
- * blocking us, then in turn block on the map lock that we
- * hold). Because of problems in the recursive lock package,
- * we cannot upgrade to a write lock in vm_map_lookup. Thus,
- * any actions that require the write lock must be done
- * beforehand. Because we keep the read lock on the map, the
- * copy-on-write status of the entries we modify here cannot
- * change.
- */
-
- /*
- * Pass 1.
- */
- while ((entry != &map->header) && (entry->start < end)) {
- if (entry->wired_count == 0) {
-
- /*
- * Perform actions of vm_map_lookup that need
- * the write lock on the map: create a shadow
- * object for a copy-on-write region, or an
- * object for a zero-fill region.
- *
- * We don't have to do this for entries that
- * point to sub maps, because we won't
- * hold the lock on the sub map.
- */
- if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
- int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
- if (copyflag &&
- ((entry->protection & VM_PROT_WRITE) != 0)) {
-
- vm_object_shadow(&entry->object.vm_object,
- &entry->offset,
- atop(entry->end - entry->start));
- entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
- } else if (entry->object.vm_object == NULL &&
- !map->system_map) {
- entry->object.vm_object =
- vm_object_allocate(OBJT_DEFAULT,
- atop(entry->end - entry->start));
- entry->offset = (vm_offset_t) 0;
- }
- }
- }
- vm_map_clip_start(map, entry, start);
- vm_map_clip_end(map, entry, end);
- entry->wired_count++;
-
- /*
- * Check for holes
- */
- if (entry->end < end &&
- (entry->next == &map->header ||
- entry->next->start > entry->end)) {
- /*
- * Found one. Object creation actions do not
- * need to be undone, but the wired counts
- * need to be restored.
- */
- while (entry != &map->header && entry->end > start) {
- entry->wired_count--;
- entry = entry->prev;
- }
- vm_map_unlock(map);
- return (KERN_INVALID_ARGUMENT);
- }
- entry = entry->next;
- }
-
- /*
- * Pass 2.
- */
-
- /*
- * HACK HACK HACK HACK
- *
- * If we are wiring in the kernel map or a submap of it,
- * unlock the map to avoid deadlocks. We trust that the
- * kernel is well-behaved, and therefore will not do
- * anything destructive to this region of the map while
- * we have it unlocked. We cannot trust user processes
- * to do the same.
- *
- * HACK HACK HACK HACK
- */
- if (vm_map_pmap(map) == kernel_pmap) {
- vm_map_unlock(map); /* trust me ... */
- } else {
- vm_map_lock_downgrade(map);
- }
-
- rv = 0;
- entry = start_entry;
- while (entry != &map->header && entry->start < end) {
- /*
- * If vm_fault_wire fails for any page we need to undo
- * what has been done. We decrement the wiring count
- * for those pages which have not yet been wired (now)
- * and unwire those that have (later).
- *
- * XXX this violates the locking protocol on the map,
- * needs to be fixed.
- */
- if (rv)
- entry->wired_count--;
- else if (entry->wired_count == 1) {
- rv = vm_fault_wire(map, entry->start, entry->end);
- if (rv) {
- failed = entry->start;
- entry->wired_count--;
- }
- }
- entry = entry->next;
- }
-
- if (vm_map_pmap(map) == kernel_pmap) {
- vm_map_lock(map);
- }
- if (rv) {
- vm_map_unlock(map);
- (void) vm_map_pageable(map, start, failed, TRUE);
- return (rv);
- }
- /*
- * An exclusive lock on the map is needed in order to call
- * vm_map_simplify_entry(). If the current lock on the map
- * is only a shared lock, an upgrade is needed.
- */
- if (vm_map_pmap(map) != kernel_pmap &&
- vm_map_lock_upgrade(map)) {
- vm_map_lock(map);
- if (vm_map_lookup_entry(map, start, &start_entry) ==
- FALSE) {
- vm_map_unlock(map);
- return KERN_SUCCESS;
- }
- }
- vm_map_simplify_entry(map, start_entry);
- }
-
- vm_map_unlock(map);
-
- return (KERN_SUCCESS);
-}
-
-/*
* vm_map_clean
*
* Push any dirty cached pages in the address range to their pager.
OpenPOWER on IntegriCloud