From f883ef447af57985b21cde8cd13232ca845190a4 Mon Sep 17 00:00:00 2001 From: dillon Date: Fri, 26 Oct 2001 00:08:05 +0000 Subject: Implement kern.maxvnodes. adjusting kern.maxvnodes now actually has a real effect. Optimize vfs_msync(). Avoid having to continually drop and re-obtain mutexes when scanning the vnode list. Improves looping case by 500%. Optimize ffs_sync(). Avoid having to continually drop and re-obtain mutexes when scanning the vnode list. This makes a couple of assumptions, which I believe are ok, in regards to vnode stability when the mount list mutex is held. Improves looping case by 500%. (more optimization work is needed on top of these fixes) MFC after: 1 week --- sys/vm/vm_object.c | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) (limited to 'sys/vm/vm_object.c') diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 1d5a989..b7613eb 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -321,8 +321,11 @@ vm_object_reference(vm_object_t object) if (object == NULL) return; +#if 0 + /* object can be re-referenced during final cleaning */ KASSERT(!(object->flags & OBJ_DEAD), ("vm_object_reference: attempting to reference dead obj")); +#endif object->ref_count++; if (object->type == OBJT_VNODE) { @@ -454,8 +457,13 @@ doterm: temp->generation++; object->backing_object = NULL; } - vm_object_terminate(object); - /* unlocks and deallocates object */ + /* + * Don't double-terminate, we could be in a termination + * recursion due to the terminate having to sync data + * to disk. + */ + if ((object->flags & OBJ_DEAD) == 0) + vm_object_terminate(object); object = temp; } } @@ -627,7 +635,17 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int } if (clearobjflags && (tstart == 0) && (tend == object->size)) { + struct vnode *vp; + vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); + if (object->type == OBJT_VNODE && + (vp = (struct vnode *)object->handle) != NULL) { + if (vp->v_flag & VOBJDIRTY) { + mtx_lock(&vp->v_interlock); + vp->v_flag &= ~VOBJDIRTY; + mtx_unlock(&vp->v_interlock); + } + } } rescan: @@ -1357,6 +1375,8 @@ vm_object_collapse(vm_object_t object) * and no object references within it, all that is * necessary is to dispose of it. */ + KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); + KASSERT(TAILQ_FIRST(&backing_object->memq) == NULL, ("backing_object %p somehow has left over pages during collapse!", backing_object)); TAILQ_REMOVE( &vm_object_list, @@ -1684,6 +1704,23 @@ vm_object_in_map(vm_object_t object) return 0; } +void +vm_object_set_writeable_dirty(vm_object_t object) +{ + struct vnode *vp; + + vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); + if (object->type == OBJT_VNODE && + (vp = (struct vnode *)object->handle) != NULL) { + if ((vp->v_flag & VOBJDIRTY) == 0) { + mtx_lock(&vp->v_interlock); + vp->v_flag |= VOBJDIRTY; + mtx_unlock(&vp->v_interlock); + } + } +} + + DB_SHOW_COMMAND(vmochk, vm_object_check) { vm_object_t object; -- cgit v1.1