summaryrefslogtreecommitdiffstats
path: root/sys/fs/nfsclient
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-02-20 12:03:20 +0000
committerattilio <attilio@FreeBSD.org>2013-02-20 12:03:20 +0000
commit15bf891afe5ecb096114725fc8e6dc1cc3ef70d6 (patch)
tree430a1525becbd2674f05fbcf81b84fccc4aa7865 /sys/fs/nfsclient
parent1f1e13ca0304c5d3cab0d4c118678ec546f935bc (diff)
downloadFreeBSD-src-15bf891afe5ecb096114725fc8e6dc1cc3ef70d6.zip
FreeBSD-src-15bf891afe5ecb096114725fc8e6dc1cc3ef70d6.tar.gz
Rename VM_OBJECT_LOCK(), VM_OBJECT_UNLOCK() and VM_OBJECT_TRYLOCK() to
their "write" versions. Sponsored by: EMC / Isilon storage division
Diffstat (limited to 'sys/fs/nfsclient')
-rw-r--r--sys/fs/nfsclient/nfs_clbio.c18
-rw-r--r--sys/fs/nfsclient/nfs_clnode.c4
-rw-r--r--sys/fs/nfsclient/nfs_clvnops.c4
3 files changed, 13 insertions, 13 deletions
diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c
index 64627f1..f0a44a4 100644
--- a/sys/fs/nfsclient/nfs_clbio.c
+++ b/sys/fs/nfsclient/nfs_clbio.c
@@ -135,7 +135,7 @@ ncl_getpages(struct vop_getpages_args *ap)
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@@ -144,10 +144,10 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* We use only the kva address for the buffer, but this is extremely
@@ -177,7 +177,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
ncl_printf("nfs_getpages: error %d\n", error);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
@@ -185,7 +185,7 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
@@ -196,7 +196,7 @@ ncl_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
@@ -232,7 +232,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (i != ap->a_reqpage)
vm_page_readahead_finish(m);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
@@ -1354,9 +1354,9 @@ ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
* Now, flush as required.
*/
if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
- VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
/*
* If the page clean was interrupted, fail the invalidation.
* Not doing so, we run the risk of losing dirty pages in the
diff --git a/sys/fs/nfsclient/nfs_clnode.c b/sys/fs/nfsclient/nfs_clnode.c
index eaaec9c..0cd503c 100644
--- a/sys/fs/nfsclient/nfs_clnode.c
+++ b/sys/fs/nfsclient/nfs_clnode.c
@@ -216,10 +216,10 @@ ncl_inactive(struct vop_inactive_args *ap)
* stateid is available for the writes.
*/
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
retv = vm_object_page_clean(vp->v_object, 0, 0,
OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
} else
retv = TRUE;
if (retv == TRUE) {
diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c
index f778009..2e105f8 100644
--- a/sys/fs/nfsclient/nfs_clvnops.c
+++ b/sys/fs/nfsclient/nfs_clvnops.c
@@ -697,9 +697,9 @@ nfs_close(struct vop_close_args *ap)
* mmap'ed writes or via write().
*/
if (nfs_clean_pages_on_close && vp->v_object) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {
OpenPOWER on IntegriCloud