summaryrefslogtreecommitdiffstats
path: root/sys/fs
diff options
context:
space:
mode:
Diffstat (limited to 'sys/fs')
-rw-r--r--sys/fs/fuse/fuse_io.c5
-rw-r--r--sys/fs/fuse/fuse_vnops.c20
-rw-r--r--sys/fs/nfsclient/nfs_clbio.c19
-rw-r--r--sys/fs/nfsclient/nfs_clnode.c4
-rw-r--r--sys/fs/nfsclient/nfs_clvnops.c4
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c8
-rw-r--r--sys/fs/procfs/procfs_map.c12
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c12
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c46
9 files changed, 68 insertions, 62 deletions
diff --git a/sys/fs/fuse/fuse_io.c b/sys/fs/fuse/fuse_io.c
index 5feec1e..89b57bb 100644
--- a/sys/fs/fuse/fuse_io.c
+++ b/sys/fs/fuse/fuse_io.c
@@ -69,6 +69,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/sx.h>
#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/proc.h>
#include <sys/mount.h>
#include <sys/vnode.h>
@@ -784,9 +785,9 @@ fuse_io_invalbuf(struct vnode *vp, struct thread *td)
fvdat->flag |= FN_FLUSHINPROG;
if (vp->v_bufobj.bo_object != NULL) {
- VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
}
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
while (error) {
diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c
index cc9733d..69c511d 100644
--- a/sys/fs/fuse/fuse_vnops.c
+++ b/sys/fs/fuse/fuse_vnops.c
@@ -67,7 +67,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/proc.h>
#include <sys/mount.h>
@@ -1758,7 +1758,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
* can only occur at the file EOF.
*/
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
@@ -1769,11 +1769,11 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
return 0;
}
fuse_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
/*
* We use only the kva address for the buffer, but this is extremely
@@ -1803,7 +1803,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
FS_DEBUG("error %d\n", error);
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@@ -1813,7 +1813,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
return VM_PAGER_ERROR;
}
/*
@@ -1823,7 +1823,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
@@ -1886,7 +1886,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
return 0;
}
@@ -1975,9 +1975,9 @@ fuse_vnop_putpages(struct vop_putpages_args *ap)
for (i = 0; i < nwritten; i++) {
rtvals[i] = VM_PAGER_OK;
- VM_OBJECT_LOCK(pages[i]->object);
+ VM_OBJECT_WLOCK(pages[i]->object);
vm_page_undirty(pages[i]);
- VM_OBJECT_UNLOCK(pages[i]->object);
+ VM_OBJECT_WUNLOCK(pages[i]->object);
}
}
return rtvals[0];
diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c
index dba3bc9..f0a44a4 100644
--- a/sys/fs/nfsclient/nfs_clbio.c
+++ b/sys/fs/nfsclient/nfs_clbio.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/kernel.h>
#include <sys/mount.h>
+#include <sys/rwlock.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -134,7 +135,7 @@ ncl_getpages(struct vop_getpages_args *ap)
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@@ -143,10 +144,10 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* We use only the kva address for the buffer, but this is extremely
@@ -176,7 +177,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
ncl_printf("nfs_getpages: error %d\n", error);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
@@ -184,7 +185,7 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
@@ -195,7 +196,7 @@ ncl_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
@@ -231,7 +232,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (i != ap->a_reqpage)
vm_page_readahead_finish(m);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
@@ -1353,9 +1354,9 @@ ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
* Now, flush as required.
*/
if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
- VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
/*
* If the page clean was interrupted, fail the invalidation.
* Not doing so, we run the risk of losing dirty pages in the
diff --git a/sys/fs/nfsclient/nfs_clnode.c b/sys/fs/nfsclient/nfs_clnode.c
index eaaec9c..0cd503c 100644
--- a/sys/fs/nfsclient/nfs_clnode.c
+++ b/sys/fs/nfsclient/nfs_clnode.c
@@ -216,10 +216,10 @@ ncl_inactive(struct vop_inactive_args *ap)
* stateid is available for the writes.
*/
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
retv = vm_object_page_clean(vp->v_object, 0, 0,
OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
} else
retv = TRUE;
if (retv == TRUE) {
diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c
index f778009..2e105f8 100644
--- a/sys/fs/nfsclient/nfs_clvnops.c
+++ b/sys/fs/nfsclient/nfs_clvnops.c
@@ -697,9 +697,9 @@ nfs_close(struct vop_close_args *ap)
* mmap'ed writes or via write().
*/
if (nfs_clean_pages_on_close && vp->v_object) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index ef98e2b..d5cc979 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -1267,9 +1267,9 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
*/
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
} else {
@@ -1298,10 +1298,10 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, off, off + cnt,
OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
bo = &vp->v_bufobj;
diff --git a/sys/fs/procfs/procfs_map.c b/sys/fs/procfs/procfs_map.c
index 3f57add..542c8fe 100644
--- a/sys/fs/procfs/procfs_map.c
+++ b/sys/fs/procfs/procfs_map.c
@@ -43,9 +43,9 @@
#include <sys/filedesc.h>
#include <sys/malloc.h>
#include <sys/mount.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#ifdef COMPAT_FREEBSD32
#include <sys/sysent.h>
@@ -132,7 +132,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
privateresident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
privateresident = obj->resident_page_count;
}
@@ -148,9 +148,9 @@ procfs_doprocmap(PFS_FILL_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
last_timestamp = map->timestamp;
@@ -181,12 +181,12 @@ procfs_doprocmap(PFS_FILL_ARGS)
break;
}
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
flags = obj->flags;
ref_count = obj->ref_count;
shadow_count = obj->shadow_count;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(td, vp, &fullpath, &freepath);
vrele(vp);
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 47ac2e6..b003b2c 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -38,9 +38,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/fnv_hash.h>
+#include <sys/lock.h>
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
@@ -1270,7 +1272,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
return (ENOSPC);
- VM_OBJECT_LOCK(uobj);
+ VM_OBJECT_WLOCK(uobj);
if (newsize < oldsize) {
/*
* Zero the truncated part of the last page.
@@ -1290,9 +1292,9 @@ retry:
} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL);
if (m == NULL) {
- VM_OBJECT_UNLOCK(uobj);
+ VM_OBJECT_WUNLOCK(uobj);
VM_WAIT;
- VM_OBJECT_LOCK(uobj);
+ VM_OBJECT_WLOCK(uobj);
goto retry;
} else if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
@@ -1312,7 +1314,7 @@ retry:
if (ignerr)
m = NULL;
else {
- VM_OBJECT_UNLOCK(uobj);
+ VM_OBJECT_WUNLOCK(uobj);
return (EIO);
}
}
@@ -1334,7 +1336,7 @@ retry:
}
}
uobj->size = newpages;
- VM_OBJECT_UNLOCK(uobj);
+ VM_OBJECT_WUNLOCK(uobj);
TMPFS_LOCK(tmp);
tmp->tm_pages_used += (newpages - oldpages);
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index bddcf24..54c95ff 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -39,9 +39,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/fcntl.h>
#include <sys/lockf.h>
+#include <sys/lock.h>
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/stat.h>
@@ -445,7 +447,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
vm_page_t m;
int error, rv;
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
m = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
@@ -455,20 +457,20 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
return (EIO);
}
} else
vm_page_zero_invalid(m, TRUE);
}
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
vm_page_lock(m);
vm_page_unwire(m, TRUE);
vm_page_unlock(m);
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
return (error);
}
@@ -511,7 +513,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
offset = addr & PAGE_MASK;
tlen = MIN(PAGE_SIZE - offset, len);
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
lookupvpg:
if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
vm_page_is_valid(m, offset, tlen)) {
@@ -525,11 +527,11 @@ lookupvpg:
goto lookupvpg;
}
vm_page_busy(m);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
return (error);
} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
KASSERT(offset == 0,
@@ -544,7 +546,7 @@ lookupvpg:
goto lookupvpg;
}
vm_page_busy(m);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
ma = (char *)sf_buf_kva(sf);
@@ -557,14 +559,14 @@ lookupvpg:
}
sf_buf_free(sf);
sched_unpin();
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
if (error == 0)
m->valid = VM_PAGE_BITS_ALL;
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
return (error);
}
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);
return (error);
@@ -634,7 +636,7 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
offset = addr & PAGE_MASK;
tlen = MIN(PAGE_SIZE - offset, len);
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
lookupvpg:
if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
vm_page_is_valid(vpg, offset, tlen)) {
@@ -649,15 +651,15 @@ lookupvpg:
}
vm_page_busy(vpg);
vm_page_undirty(vpg);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
error = uiomove_fromphys(&vpg, offset, tlen, uio);
} else {
if (vm_page_is_cached(vobj, idx))
vm_page_cache_free(vobj, idx, idx + 1);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
vpg = NULL;
}
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (tpg->valid != VM_PAGE_BITS_ALL) {
@@ -673,14 +675,14 @@ lookupvpg:
} else
vm_page_zero_invalid(tpg, TRUE);
}
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
if (vpg == NULL)
error = uiomove_fromphys(&tpg, offset, tlen, uio);
else {
KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
pmap_copy_page(vpg, tpg);
}
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (error == 0) {
KASSERT(tpg->valid == VM_PAGE_BITS_ALL,
("parts of tpg invalid"));
@@ -691,11 +693,11 @@ lookupvpg:
vm_page_unlock(tpg);
vm_page_wakeup(tpg);
out:
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
if (vpg != NULL) {
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
vm_page_wakeup(vpg);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
}
return (error);
OpenPOWER on IntegriCloud