diff options
author | kib <kib@FreeBSD.org> | 2012-10-22 17:50:54 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2012-10-22 17:50:54 +0000 |
commit | 560aa751e0f5cfef868bdf3fab01cdbc5169ef82 (patch) | |
tree | 6e9ef0a47c5e91d26227820c50c9767e84550821 /sys/vm | |
parent | ca71b68ea40c83f641d6485e027368568f244197 (diff) | |
download | FreeBSD-src-560aa751e0f5cfef868bdf3fab01cdbc5169ef82.zip FreeBSD-src-560aa751e0f5cfef868bdf3fab01cdbc5169ef82.tar.gz |
Remove the support for using non-mpsafe filesystem modules.
In particular, do not lock Giant conditionally when calling into the
filesystem module, remove the VFS_LOCK_GIANT() and related
macros. Stop handling buffers belonging to non-mpsafe filesystems.
The VFS_VERSION is bumped to indicate the interface change which does
not result in the interface signatures changes.
Conducted and reviewed by: attilio
Tested by: pho
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/vm_fault.c | 21 | ||||
-rw-r--r-- | sys/vm/vm_mmap.c | 12 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 33 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 7 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 9 |
5 files changed, 5 insertions, 77 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index e3a29f0..a3a90c6 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -102,8 +102,6 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_pager.h> #include <vm/vm_extern.h> -#include <sys/mount.h> /* XXX Temporary for VFS_LOCK_GIANT() */ - #define PFBAK 4 #define PFFOR 4 #define PAGEORDER_SIZE (PFBAK+PFFOR) @@ -135,7 +133,6 @@ struct faultstate { vm_map_entry_t entry; int lookup_still_valid; struct vnode *vp; - int vfslocked; }; static void vm_fault_cache_behind(const struct faultstate *fs, int distance); @@ -182,8 +179,6 @@ unlock_and_deallocate(struct faultstate *fs) vput(fs->vp); fs->vp = NULL; } - VFS_UNLOCK_GIANT(fs->vfslocked); - fs->vfslocked = 0; } /* @@ -255,7 +250,6 @@ vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, growstack = TRUE; PCPU_INC(cnt.v_vm_faults); fs.vp = NULL; - fs.vfslocked = 0; faultcount = reqpage = 0; RetryFault:; @@ -513,7 +507,6 @@ readrest: */ unlock_map(&fs); -vnode_lock: if (fs.object->type == OBJT_VNODE) { vp = fs.object->handle; if (vp == fs.vp) @@ -524,25 +517,12 @@ vnode_lock: } locked = VOP_ISLOCKED(vp); - if (VFS_NEEDSGIANT(vp->v_mount) && !fs.vfslocked) { - fs.vfslocked = 1; - if (!mtx_trylock(&Giant)) { - VM_OBJECT_UNLOCK(fs.object); - mtx_lock(&Giant); - VM_OBJECT_LOCK(fs.object); - goto vnode_lock; - } - } if (locked != LK_EXCLUSIVE) locked = LK_SHARED; /* Do not sleep for vnode lock while fs.m is busy */ error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread); if (error != 0) { - int vfslocked; - - vfslocked = fs.vfslocked; - fs.vfslocked = 0; /* Keep Giant */ vhold(vp); release_page(&fs); unlock_and_deallocate(&fs); @@ -550,7 +530,6 @@ vnode_lock: LK_CANRECURSE, curthread); vdrop(vp); fs.vp = vp; - fs.vfslocked = vfslocked; KASSERT(error == 0, ("vm_fault: vget failed")); goto RetryFault; diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index e0aab73..74b4ec2 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -1256,7 +1256,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, vm_offset_t foff; struct mount *mp; struct ucred *cred; - int error, flags, locktype, vfslocked; + int error, flags, locktype; mp = vp->v_mount; cred = td->td_ucred; @@ -1264,11 +1264,8 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, locktype = LK_EXCLUSIVE; else locktype = LK_SHARED; - vfslocked = VFS_LOCK_GIANT(mp); - if ((error = vget(vp, locktype, td)) != 0) { - VFS_UNLOCK_GIANT(vfslocked); + if ((error = vget(vp, locktype, td)) != 0) return (error); - } foff = *foffp; flags = *flagsp; obj = vp->v_object; @@ -1288,10 +1285,8 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, * underlying fs. */ error = vget(vp, locktype, td); - if (error != 0) { - VFS_UNLOCK_GIANT(vfslocked); + if (error != 0) return (error); - } } if (locktype == LK_EXCLUSIVE) { *writecounted = TRUE; @@ -1344,7 +1339,6 @@ mark_atime: done: vput(vp); - VFS_UNLOCK_GIANT(vfslocked); return (error); } diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index e7e077d..e19750c 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -427,7 +427,6 @@ vm_object_vndeallocate(vm_object_t object) { struct vnode *vp = (struct vnode *) object->handle; - VFS_ASSERT_GIANT(vp->v_mount); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_VNODE, ("vm_object_vndeallocate: not a vnode object")); @@ -480,38 +479,11 @@ vm_object_deallocate(vm_object_t object) vm_object_t temp; while (object != NULL) { - int vfslocked; - - vfslocked = 0; - restart: VM_OBJECT_LOCK(object); if (object->type == OBJT_VNODE) { - struct vnode *vp = (struct vnode *) object->handle; - - /* - * Conditionally acquire Giant for a vnode-backed - * object. We have to be careful since the type of - * a vnode object can change while the object is - * unlocked. - */ - if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) { - vfslocked = 1; - if (!mtx_trylock(&Giant)) { - VM_OBJECT_UNLOCK(object); - mtx_lock(&Giant); - goto restart; - } - } vm_object_vndeallocate(object); - VFS_UNLOCK_GIANT(vfslocked); return; - } else - /* - * This is to handle the case that the object - * changed type while we dropped its lock to - * obtain Giant. - */ - VFS_UNLOCK_GIANT(vfslocked); + } KASSERT(object->ref_count != 0, ("vm_object_deallocate: object deallocated too many times: %d", object->type)); @@ -987,11 +959,9 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, */ if (object->type == OBJT_VNODE && (object->flags & OBJ_MIGHTBEDIRTY) != 0) { - int vfslocked; vp = object->handle; VM_OBJECT_UNLOCK(object); (void) vn_start_write(vp, &mp, V_WAIT); - vfslocked = VFS_LOCK_GIANT(vp->v_mount); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (syncio && !invalidate && offset == 0 && OFF_TO_IDX(size) == object->size) { @@ -1015,7 +985,6 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, if (fsync_after) error = VOP_FSYNC(vp, MNT_WAIT, curthread); VOP_UNLOCK(vp, 0); - VFS_UNLOCK_GIANT(vfslocked); vn_finished_write(mp); if (error != 0) res = FALSE; diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 723fc72..04a3621 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -569,7 +569,6 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high) vm_object_t object; vm_paddr_t pa; vm_page_t m, m_tmp, next; - int vfslocked; vm_page_lock_queues(); TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) { @@ -609,13 +608,11 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high) vm_object_reference_locked(object); VM_OBJECT_UNLOCK(object); (void)vn_start_write(vp, &mp, V_WAIT); - vfslocked = VFS_LOCK_GIANT(vp->v_mount); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); VM_OBJECT_LOCK(object); vm_object_page_clean(object, 0, 0, OBJPC_SYNC); VM_OBJECT_UNLOCK(object); VOP_UNLOCK(vp, 0); - VFS_UNLOCK_GIANT(vfslocked); vm_object_deallocate(object); vn_finished_write(mp); return (TRUE); @@ -1127,7 +1124,7 @@ vm_pageout_scan(int pass) * pressure where there are insufficient clean pages * on the inactive queue, we may have to go all out. */ - int swap_pageouts_ok, vfslocked = 0; + int swap_pageouts_ok; struct vnode *vp = NULL; struct mount *mp = NULL; @@ -1191,7 +1188,6 @@ vm_pageout_scan(int pass) ("vp %p with NULL v_mount", vp)); vm_object_reference_locked(object); VM_OBJECT_UNLOCK(object); - vfslocked = VFS_LOCK_GIANT(vp->v_mount); if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, curthread)) { VM_OBJECT_LOCK(object); @@ -1270,7 +1266,6 @@ unlock_and_continue: } if (vp != NULL) vput(vp); - VFS_UNLOCK_GIANT(vfslocked); vm_object_deallocate(object); vn_finished_write(mp); } diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index cc9063b..f6848b6 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -298,7 +298,6 @@ vnode_pager_haspage(object, pindex, before, after) int poff; int bsize; int pagesperblock, blocksperpage; - int vfslocked; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); /* @@ -324,9 +323,7 @@ vnode_pager_haspage(object, pindex, before, after) reqblock = pindex * blocksperpage; } VM_OBJECT_UNLOCK(object); - vfslocked = VFS_LOCK_GIANT(vp->v_mount); err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); - VFS_UNLOCK_GIANT(vfslocked); VM_OBJECT_LOCK(object); if (err) return TRUE; @@ -670,15 +667,12 @@ vnode_pager_getpages(object, m, count, reqpage) int rtval; struct vnode *vp; int bytes = count * PAGE_SIZE; - int vfslocked; vp = object->handle; VM_OBJECT_UNLOCK(object); - vfslocked = VFS_LOCK_GIANT(vp->v_mount); rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); KASSERT(rtval != EOPNOTSUPP, ("vnode_pager: FS getpages not implemented\n")); - VFS_UNLOCK_GIANT(vfslocked); VM_OBJECT_LOCK(object); return rtval; } @@ -1237,7 +1231,6 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, struct vnode *vp; struct mount *mp; vm_offset_t inc; - int vfslocked; VM_OBJECT_LOCK(object); @@ -1264,7 +1257,6 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, vp = object->handle; vhold(vp); VM_OBJECT_UNLOCK(object); - vfslocked = VFS_LOCK_GIANT(vp->v_mount); mp = NULL; vn_start_write(vp, &mp, V_WAIT); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); @@ -1280,5 +1272,4 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, vdrop(vp); if (mp != NULL) vn_finished_write(mp); - VFS_UNLOCK_GIANT(vfslocked); } |