summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_object.c
diff options
context:
space:
mode:
authoralfred <alfred@FreeBSD.org>2001-05-19 01:28:09 +0000
committeralfred <alfred@FreeBSD.org>2001-05-19 01:28:09 +0000
commita3f0842419d98da211706f921fc626e160cd960b (patch)
treee86922a5639c32e1242d4f3088fc487f3be5b236 /sys/vm/vm_object.c
parent9eda9187f024233436e6a743f13bd938b1a0f19c (diff)
downloadFreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.zip
FreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.tar.gz
Introduce a global lock for the vm subsystem (vm_mtx).
vm_mtx does not recurse and is required for most low level vm operations. faults can not be taken without holding Giant. Memory subsystems can now call the base page allocators safely. Almost all atomic ops were removed as they are covered under the vm mutex. Alpha and ia64 now need to catch up to i386's trap handlers. FFS and NFS have been tested, other filesystems will need minor changes (grabbing the vm lock when twiddling page properties). Reviewed (partially) by: jake, jhb
Diffstat (limited to 'sys/vm/vm_object.c')
-rw-r--r--sys/vm/vm_object.c58
1 files changed, 51 insertions, 7 deletions
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 33fe834..30ef190 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -146,6 +146,24 @@ _vm_object_allocate(type, size, object)
vm_object_t object;
{
int incr;
+ int hadvmlock;
+
+ /*
+ * XXX: Not all callers seem to have the lock, compensate.
+ * I'm pretty sure we need to bump the gen count before possibly
+ * nuking the data contained within while under the lock.
+ */
+ hadvmlock = mtx_owned(&vm_mtx);
+ if (!hadvmlock)
+ mtx_lock(&vm_mtx);
+ object->generation++;
+ if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
+ vm_object_set_flag(object, OBJ_ONEMAPPING);
+ TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
+ vm_object_count++;
+ if (!hadvmlock)
+ mtx_unlock(&vm_mtx);
+
TAILQ_INIT(&object->memq);
TAILQ_INIT(&object->shadow_head);
@@ -153,8 +171,6 @@ _vm_object_allocate(type, size, object)
object->size = size;
object->ref_count = 1;
object->flags = 0;
- if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
- vm_object_set_flag(object, OBJ_ONEMAPPING);
object->paging_in_progress = 0;
object->resident_page_count = 0;
object->shadow_count = 0;
@@ -175,10 +191,6 @@ _vm_object_allocate(type, size, object)
*/
object->hash_rand = object_hash_rand - 129;
- object->generation++;
-
- TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
- vm_object_count++;
object_hash_rand = object->hash_rand;
}
@@ -226,7 +238,6 @@ vm_object_allocate(type, size)
vm_object_t result;
result = (vm_object_t) zalloc(obj_zone);
-
_vm_object_allocate(type, size, result);
return (result);
@@ -250,18 +261,29 @@ vm_object_reference(object)
object->ref_count++;
if (object->type == OBJT_VNODE) {
+ mtx_unlock(&vm_mtx);
+ mtx_lock(&Giant);
while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) {
printf("vm_object_reference: delay in getting object\n");
}
+ mtx_unlock(&Giant);
+ mtx_lock(&vm_mtx);
}
}
+/*
+ * handle deallocating a object of type OBJT_VNODE
+ *
+ * requires vm_mtx
+ * may block
+ */
void
vm_object_vndeallocate(object)
vm_object_t object;
{
struct vnode *vp = (struct vnode *) object->handle;
+ mtx_assert(&vm_mtx, MA_OWNED);
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
@@ -277,7 +299,14 @@ vm_object_vndeallocate(object)
vp->v_flag &= ~VTEXT;
vm_object_clear_flag(object, OBJ_OPT);
}
+ /*
+ * vrele may need a vop lock
+ */
+ mtx_unlock(VM_OBJECT_MTX(object));
+ mtx_lock(&Giant);
vrele(vp);
+ mtx_unlock(&Giant);
+ mtx_lock(VM_OBJECT_MTX(object));
}
/*
@@ -290,6 +319,7 @@ vm_object_vndeallocate(object)
* may be relinquished.
*
* No object may be locked.
+ * vm_mtx must be held
*/
void
vm_object_deallocate(object)
@@ -297,6 +327,7 @@ vm_object_deallocate(object)
{
vm_object_t temp;
+ mtx_assert(&vm_mtx, MA_OWNED);
while (object != NULL) {
if (object->type == OBJT_VNODE) {
@@ -422,7 +453,11 @@ vm_object_terminate(object)
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
vp = (struct vnode *) object->handle;
+ mtx_unlock(VM_OBJECT_MTX(object));
+ mtx_lock(&Giant);
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
+ mtx_unlock(&Giant);
+ mtx_lock(VM_OBJECT_MTX(object));
}
KASSERT(object->ref_count == 0,
@@ -507,6 +542,7 @@ vm_object_page_clean(object, start, end, flags)
vm_page_t ma[vm_pageout_page_count];
int curgeneration;
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
if (object->type != OBJT_VNODE ||
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
return;
@@ -962,6 +998,7 @@ vm_object_backing_scan(vm_object_t object, int op)
vm_pindex_t backing_offset_index;
s = splvm();
+ mtx_assert(&vm_mtx, MA_OWNED);
backing_object = object->backing_object;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
@@ -1175,6 +1212,9 @@ void
vm_object_collapse(object)
vm_object_t object;
{
+
+ mtx_assert(&vm_mtx, MA_OWNED);
+
while (TRUE) {
vm_object_t backing_object;
@@ -1386,6 +1426,8 @@ vm_object_page_remove(object, start, end, clean_only)
unsigned int size;
int all;
+ mtx_assert(&vm_mtx, MA_OWNED);
+
if (object == NULL ||
object->resident_page_count == 0)
return;
@@ -1502,6 +1544,8 @@ vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
{
vm_pindex_t next_pindex;
+ mtx_assert(&vm_mtx, MA_OWNED);
+
if (prev_object == NULL) {
return (TRUE);
}
OpenPOWER on IntegriCloud