summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_map.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2003-11-09 05:25:35 +0000
committeralc <alc@FreeBSD.org>2003-11-09 05:25:35 +0000
commit269cf5aa09e2841892619641efd73d79b0c7503a (patch)
treebd200432449621c47dc3d9215eac5e23df813e31 /sys/vm/vm_map.c
parent3f532e652b08331243d297085d4f13318e94f586 (diff)
downloadFreeBSD-src-269cf5aa09e2841892619641efd73d79b0c7503a.zip
FreeBSD-src-269cf5aa09e2841892619641efd73d79b0c7503a.tar.gz
- Rename vm_map_clean() to vm_map_sync(). This better reflects the fact
that msync(2) is its only caller. - Migrate the parts of the old vm_map_clean() that examined the internals of a vm object to a new function vm_object_sync() that is implemented in vm_object.c. At the same, introduce the necessary vm object locking so that vm_map_sync() and vm_object_sync() can be called without Giant. Reviewed by: tegge
Diffstat (limited to 'sys/vm/vm_map.c')
-rw-r--r--sys/vm/vm_map.c64
1 files changed, 5 insertions, 59 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 1586cb3..40db4c1 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1946,7 +1946,7 @@ done:
}
/*
- * vm_map_clean
+ * vm_map_sync
*
* Push any dirty cached pages in the address range to their pager.
* If syncio is TRUE, dirty pages are written synchronously.
@@ -1955,7 +1955,7 @@ done:
* Returns an error if any part of the specified range is not mapped.
*/
int
-vm_map_clean(
+vm_map_sync(
vm_map_t map,
vm_offset_t start,
vm_offset_t end,
@@ -1968,8 +1968,6 @@ vm_map_clean(
vm_object_t object;
vm_ooffset_t offset;
- GIANT_REQUIRED;
-
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &entry)) {
@@ -1993,9 +1991,11 @@ vm_map_clean(
}
if (invalidate) {
+ mtx_lock(&Giant);
vm_page_lock_queues();
pmap_remove(map->pmap, start, end);
vm_page_unlock_queues();
+ mtx_unlock(&Giant);
}
/*
* Make a second pass, cleaning/uncaching pages from the indicated
@@ -2021,61 +2021,7 @@ vm_map_clean(
} else {
object = current->object.vm_object;
}
- /*
- * Note that there is absolutely no sense in writing out
- * anonymous objects, so we track down the vnode object
- * to write out.
- * We invalidate (remove) all pages from the address space
- * anyway, for semantic correctness.
- *
- * note: certain anonymous maps, such as MAP_NOSYNC maps,
- * may start out with a NULL object.
- */
- while (object && object->backing_object) {
- object = object->backing_object;
- offset += object->backing_object_offset;
- if (object->size < OFF_TO_IDX(offset + size))
- size = IDX_TO_OFF(object->size) - offset;
- }
- if (object && (object->type == OBJT_VNODE) &&
- (current->protection & VM_PROT_WRITE)) {
- /*
- * Flush pages if writing is allowed, invalidate them
- * if invalidation requested. Pages undergoing I/O
- * will be ignored by vm_object_page_remove().
- *
- * We cannot lock the vnode and then wait for paging
- * to complete without deadlocking against vm_fault.
- * Instead we simply call vm_object_page_remove() and
- * allow it to block internally on a page-by-page
- * basis when it encounters pages undergoing async
- * I/O.
- */
- int flags;
-
- vm_object_reference(object);
- vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
- flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
- flags |= invalidate ? OBJPC_INVAL : 0;
- VM_OBJECT_LOCK(object);
- vm_object_page_clean(object,
- OFF_TO_IDX(offset),
- OFF_TO_IDX(offset + size + PAGE_MASK),
- flags);
- VM_OBJECT_UNLOCK(object);
- VOP_UNLOCK(object->handle, 0, curthread);
- vm_object_deallocate(object);
- }
- if (object && invalidate &&
- ((object->type == OBJT_VNODE) ||
- (object->type == OBJT_DEVICE))) {
- VM_OBJECT_LOCK(object);
- vm_object_page_remove(object,
- OFF_TO_IDX(offset),
- OFF_TO_IDX(offset + size + PAGE_MASK),
- FALSE);
- VM_OBJECT_UNLOCK(object);
- }
+ vm_object_sync(object, offset, size, syncio, invalidate);
start += size;
}
OpenPOWER on IntegriCloud