summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2003-11-09 05:25:35 +0000
committeralc <alc@FreeBSD.org>2003-11-09 05:25:35 +0000
commit269cf5aa09e2841892619641efd73d79b0c7503a (patch)
treebd200432449621c47dc3d9215eac5e23df813e31
parent3f532e652b08331243d297085d4f13318e94f586 (diff)
downloadFreeBSD-src-269cf5aa09e2841892619641efd73d79b0c7503a.zip
FreeBSD-src-269cf5aa09e2841892619641efd73d79b0c7503a.tar.gz
- Rename vm_map_clean() to vm_map_sync(). This better reflects the fact
that msync(2) is its only caller. - Migrate the parts of the old vm_map_clean() that examined the internals of a vm object to a new function vm_object_sync() that is implemented in vm_object.c. At the same, introduce the necessary vm object locking so that vm_map_sync() and vm_object_sync() can be called without Giant. Reviewed by: tegge
-rw-r--r--sys/vm/vm_map.c64
-rw-r--r--sys/vm/vm_map.h2
-rw-r--r--sys/vm/vm_mmap.c2
-rw-r--r--sys/vm/vm_object.c69
-rw-r--r--sys/vm/vm_object.h2
5 files changed, 78 insertions, 61 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 1586cb3..40db4c1 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1946,7 +1946,7 @@ done:
}
/*
- * vm_map_clean
+ * vm_map_sync
*
* Push any dirty cached pages in the address range to their pager.
* If syncio is TRUE, dirty pages are written synchronously.
@@ -1955,7 +1955,7 @@ done:
* Returns an error if any part of the specified range is not mapped.
*/
int
-vm_map_clean(
+vm_map_sync(
vm_map_t map,
vm_offset_t start,
vm_offset_t end,
@@ -1968,8 +1968,6 @@ vm_map_clean(
vm_object_t object;
vm_ooffset_t offset;
- GIANT_REQUIRED;
-
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &entry)) {
@@ -1993,9 +1991,11 @@ vm_map_clean(
}
if (invalidate) {
+ mtx_lock(&Giant);
vm_page_lock_queues();
pmap_remove(map->pmap, start, end);
vm_page_unlock_queues();
+ mtx_unlock(&Giant);
}
/*
* Make a second pass, cleaning/uncaching pages from the indicated
@@ -2021,61 +2021,7 @@ vm_map_clean(
} else {
object = current->object.vm_object;
}
- /*
- * Note that there is absolutely no sense in writing out
- * anonymous objects, so we track down the vnode object
- * to write out.
- * We invalidate (remove) all pages from the address space
- * anyway, for semantic correctness.
- *
- * note: certain anonymous maps, such as MAP_NOSYNC maps,
- * may start out with a NULL object.
- */
- while (object && object->backing_object) {
- object = object->backing_object;
- offset += object->backing_object_offset;
- if (object->size < OFF_TO_IDX(offset + size))
- size = IDX_TO_OFF(object->size) - offset;
- }
- if (object && (object->type == OBJT_VNODE) &&
- (current->protection & VM_PROT_WRITE)) {
- /*
- * Flush pages if writing is allowed, invalidate them
- * if invalidation requested. Pages undergoing I/O
- * will be ignored by vm_object_page_remove().
- *
- * We cannot lock the vnode and then wait for paging
- * to complete without deadlocking against vm_fault.
- * Instead we simply call vm_object_page_remove() and
- * allow it to block internally on a page-by-page
- * basis when it encounters pages undergoing async
- * I/O.
- */
- int flags;
-
- vm_object_reference(object);
- vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
- flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
- flags |= invalidate ? OBJPC_INVAL : 0;
- VM_OBJECT_LOCK(object);
- vm_object_page_clean(object,
- OFF_TO_IDX(offset),
- OFF_TO_IDX(offset + size + PAGE_MASK),
- flags);
- VM_OBJECT_UNLOCK(object);
- VOP_UNLOCK(object->handle, 0, curthread);
- vm_object_deallocate(object);
- }
- if (object && invalidate &&
- ((object->type == OBJT_VNODE) ||
- (object->type == OBJT_DEVICE))) {
- VM_OBJECT_LOCK(object);
- vm_object_page_remove(object,
- OFF_TO_IDX(offset),
- OFF_TO_IDX(offset + size + PAGE_MASK),
- FALSE);
- VM_OBJECT_UNLOCK(object);
- }
+ vm_object_sync(object, offset, size, syncio, invalidate);
start += size;
}
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 72e804d..241cba2 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -337,13 +337,13 @@ int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_obje
vm_pindex_t *, vm_prot_t *, boolean_t *);
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
-int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
void vm_map_startup (void);
int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
+int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
void vm_map_simplify_entry (vm_map_t, vm_map_entry_t);
void vm_init2 (void);
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index a7dfc0d..158a443 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -574,7 +574,7 @@ msync(td, uap)
/*
* Clean the pages and interpret the return value.
*/
- rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
+ rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
(flags & MS_INVALIDATE) != 0);
done2:
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index abf4cf9..0099e52 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -964,6 +964,75 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
}
/*
+ * Note that there is absolutely no sense in writing out
+ * anonymous objects, so we track down the vnode object
+ * to write out.
+ * We invalidate (remove) all pages from the address space
+ * for semantic correctness.
+ *
+ * Note: certain anonymous maps, such as MAP_NOSYNC maps,
+ * may start out with a NULL object.
+ */
+void
+vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
+ boolean_t syncio, boolean_t invalidate)
+{
+ vm_object_t backing_object;
+ struct vnode *vp;
+ int flags;
+
+ if (object == NULL)
+ return;
+ VM_OBJECT_LOCK(object);
+ while ((backing_object = object->backing_object) != NULL) {
+ VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_UNLOCK(object);
+ object = backing_object;
+ offset += object->backing_object_offset;
+ if (object->size < OFF_TO_IDX(offset + size))
+ size = IDX_TO_OFF(object->size) - offset;
+ }
+ /*
+ * Flush pages if writing is allowed, invalidate them
+ * if invalidation requested. Pages undergoing I/O
+ * will be ignored by vm_object_page_remove().
+ *
+ * We cannot lock the vnode and then wait for paging
+ * to complete without deadlocking against vm_fault.
+ * Instead we simply call vm_object_page_remove() and
+ * allow it to block internally on a page-by-page
+ * basis when it encounters pages undergoing async
+ * I/O.
+ */
+ if (object->type == OBJT_VNODE &&
+ (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
+ vp = object->handle;
+ VM_OBJECT_UNLOCK(object);
+ mtx_lock(&Giant);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
+ flags |= invalidate ? OBJPC_INVAL : 0;
+ VM_OBJECT_LOCK(object);
+ vm_object_page_clean(object,
+ OFF_TO_IDX(offset),
+ OFF_TO_IDX(offset + size + PAGE_MASK),
+ flags);
+ VM_OBJECT_UNLOCK(object);
+ VOP_UNLOCK(vp, 0, curthread);
+ mtx_unlock(&Giant);
+ VM_OBJECT_LOCK(object);
+ }
+ if ((object->type == OBJT_VNODE ||
+ object->type == OBJT_DEVICE) && invalidate) {
+ vm_object_page_remove(object,
+ OFF_TO_IDX(offset),
+ OFF_TO_IDX(offset + size + PAGE_MASK),
+ FALSE);
+ }
+ VM_OBJECT_UNLOCK(object);
+}
+
+/*
* vm_object_madvise:
*
* Implements the madvise function at the object/page level.
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 96d8666..bd4f44d 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -216,6 +216,8 @@ void vm_object_reference (vm_object_t);
void vm_object_reference_locked(vm_object_t);
void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
void vm_object_split(vm_map_entry_t);
+void vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
+ boolean_t);
void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
#endif /* _KERNEL */
OpenPOWER on IntegriCloud