summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-02-20 12:03:20 +0000
committerattilio <attilio@FreeBSD.org>2013-02-20 12:03:20 +0000
commit15bf891afe5ecb096114725fc8e6dc1cc3ef70d6 (patch)
tree430a1525becbd2674f05fbcf81b84fccc4aa7865 /sys/kern
parent1f1e13ca0304c5d3cab0d4c118678ec546f935bc (diff)
downloadFreeBSD-src-15bf891afe5ecb096114725fc8e6dc1cc3ef70d6.zip
FreeBSD-src-15bf891afe5ecb096114725fc8e6dc1cc3ef70d6.tar.gz
Rename VM_OBJECT_LOCK(), VM_OBJECT_UNLOCK() and VM_OBJECT_TRYLOCK() to
their "write" versions. Sponsored by: EMC / Isilon storage division
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_elf.c8
-rw-r--r--sys/kern/kern_exec.c6
-rw-r--r--sys/kern/kern_proc.c20
-rw-r--r--sys/kern/kern_sharedpage.c4
-rw-r--r--sys/kern/subr_uio.c4
-rw-r--r--sys/kern/sys_process.c10
-rw-r--r--sys/kern/sysv_shm.c4
-rw-r--r--sys/kern/uipc_shm.c34
-rw-r--r--sys/kern/uipc_syscalls.c20
-rw-r--r--sys/kern/vfs_aio.c4
-rw-r--r--sys/kern/vfs_bio.c46
-rw-r--r--sys/kern/vfs_cluster.c18
-rw-r--r--sys/kern/vfs_default.c4
-rw-r--r--sys/kern/vfs_subr.c20
-rw-r--r--sys/kern/vfs_syscalls.c4
-rw-r--r--sys/kern/vfs_vnops.c4
16 files changed, 105 insertions, 105 deletions
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 2658411..8e16ca0 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -1278,15 +1278,15 @@ each_writable_segment(td, func, closure)
continue;
/* Ignore memory-mapped devices and such things. */
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
- VM_OBJECT_LOCK(backing_object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(object);
object = backing_object;
}
ignore_entry = object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP && object->type != OBJT_VNODE;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (ignore_entry)
continue;
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 5f0160f..6f9ff4f 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -933,7 +933,7 @@ exec_map_first_page(imgp)
object = imgp->vp->v_object;
if (object == NULL)
return (EACCES);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
#if VM_NRESERVLEVEL > 0
if ((object->flags & OBJ_COLORED) == 0) {
object->flags |= OBJ_COLORED;
@@ -968,7 +968,7 @@ exec_map_first_page(imgp)
vm_page_free(ma[0]);
vm_page_unlock(ma[0]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (EIO);
}
}
@@ -976,7 +976,7 @@ exec_map_first_page(imgp)
vm_page_hold(ma[0]);
vm_page_unlock(ma[0]);
vm_page_wakeup(ma[0]);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
imgp->firstpage = sf_buf_alloc(ma[0], 0);
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 974eeb7..6c46801 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -1995,7 +1995,7 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
kve->kve_private_resident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
kve->kve_private_resident =
obj->resident_page_count;
@@ -2010,9 +2010,9 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
@@ -2072,11 +2072,11 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
break;
}
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
kve->kve_ref_count = obj->ref_count;
kve->kve_shadow_count = obj->shadow_count;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(curthread, vp, &fullpath,
&freepath);
@@ -2162,7 +2162,7 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
kve->kve_private_resident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
kve->kve_private_resident =
obj->resident_page_count;
@@ -2183,9 +2183,9 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
@@ -2247,11 +2247,11 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
break;
}
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
kve->kve_ref_count = obj->ref_count;
kve->kve_shadow_count = obj->shadow_count;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(curthread, vp, &fullpath,
&freepath);
diff --git a/sys/kern/kern_sharedpage.c b/sys/kern/kern_sharedpage.c
index 35bb36a..20b9038 100644
--- a/sys/kern/kern_sharedpage.c
+++ b/sys/kern/kern_sharedpage.c
@@ -107,11 +107,11 @@ shared_page_init(void *dummy __unused)
sx_init(&shared_page_alloc_sx, "shpsx");
shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
VM_PROT_DEFAULT, 0, NULL);
- VM_OBJECT_LOCK(shared_page_obj);
+ VM_OBJECT_WLOCK(shared_page_obj);
m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_NOBUSY |
VM_ALLOC_ZERO);
m->valid = VM_PAGE_BITS_ALL;
- VM_OBJECT_UNLOCK(shared_page_obj);
+ VM_OBJECT_WUNLOCK(shared_page_obj);
addr = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
pmap_qenter(addr, &m, 1);
shared_page_mapping = (char *)addr;
diff --git a/sys/kern/subr_uio.c b/sys/kern/subr_uio.c
index 26ea52d..1ee265c 100644
--- a/sys/kern/subr_uio.c
+++ b/sys/kern/subr_uio.c
@@ -104,7 +104,7 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
&upindex, &prot, &wired)) != KERN_SUCCESS) {
return(EFAULT);
}
- VM_OBJECT_LOCK(uobject);
+ VM_OBJECT_WLOCK(uobject);
retry:
if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
@@ -124,7 +124,7 @@ retry:
}
vm_page_insert(kern_pg, uobject, upindex);
vm_page_dirty(kern_pg);
- VM_OBJECT_UNLOCK(uobject);
+ VM_OBJECT_WUNLOCK(uobject);
vm_map_lookup_done(map, entry);
return(KERN_SUCCESS);
}
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index bad48ba..7dc43c4 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -382,7 +382,7 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
obj = entry->object.vm_object;
if (obj != NULL)
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
} while (0);
vm_map_unlock_read(map);
@@ -395,9 +395,9 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
lobj = obj;
for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
pve->pve_offset += tobj->backing_object_offset;
}
@@ -405,8 +405,8 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
if (vp != NULL)
vref(vp);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
freepath = NULL;
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index ba419f3..a1c6b34 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -708,10 +708,10 @@ shmget_allocate_segment(td, uap, mode)
#endif
return (ENOMEM);
}
- VM_OBJECT_LOCK(shm_object);
+ VM_OBJECT_WLOCK(shm_object);
vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shm_object, OBJ_NOSPLIT);
- VM_OBJECT_UNLOCK(shm_object);
+ VM_OBJECT_WUNLOCK(shm_object);
shmseg->object = shm_object;
shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index a7dec24..3b43f8f 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -254,9 +254,9 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
int base, rv;
object = shmfd->shm_object;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (length == shmfd->shm_size) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
nobjsize = OFF_TO_IDX(length + PAGE_MASK);
@@ -268,7 +268,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
* object is mapped into the kernel.
*/
if (shmfd->shm_kmappings > 0) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (EBUSY);
}
@@ -289,9 +289,9 @@ retry:
} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
if (m == NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VM_WAIT;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
} else if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
@@ -309,7 +309,7 @@ retry:
} else {
vm_page_free(m);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (EIO);
}
}
@@ -339,7 +339,7 @@ retry:
/* Attempt to reserve the swap */
delta = ptoa(nobjsize - object->size);
if (!swap_reserve_by_cred(delta, object->cred)) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (ENOMEM);
}
object->charge += delta;
@@ -350,7 +350,7 @@ retry:
shmfd->shm_mtime = shmfd->shm_ctime;
mtx_unlock(&shm_timestamp_lock);
object->size = nobjsize;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
@@ -371,10 +371,10 @@ shm_alloc(struct ucred *ucred, mode_t mode)
shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
- VM_OBJECT_LOCK(shmfd->shm_object);
+ VM_OBJECT_WLOCK(shmfd->shm_object);
vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
- VM_OBJECT_UNLOCK(shmfd->shm_object);
+ VM_OBJECT_WUNLOCK(shmfd->shm_object);
vfs_timestamp(&shmfd->shm_birthtime);
shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
shmfd->shm_birthtime;
@@ -762,20 +762,20 @@ shm_map(struct file *fp, size_t size, off_t offset, void **memp)
return (EINVAL);
shmfd = fp->f_data;
obj = shmfd->shm_object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
/*
* XXXRW: This validation is probably insufficient, and subject to
* sign errors. It should be fixed.
*/
if (offset >= shmfd->shm_size ||
offset + size > round_page(shmfd->shm_size)) {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (EINVAL);
}
shmfd->shm_kmappings++;
vm_object_reference_locked(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
/* Map the object into the kernel_map and wire it. */
kva = vm_map_min(kernel_map);
@@ -797,9 +797,9 @@ shm_map(struct file *fp, size_t size, off_t offset, void **memp)
vm_object_deallocate(obj);
/* On failure, drop our mapping reference. */
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
shmfd->shm_kmappings--;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (vm_mmap_to_errno(rv));
}
@@ -841,10 +841,10 @@ shm_unmap(struct file *fp, void *mem, size_t size)
if (obj != shmfd->shm_object)
return (EINVAL);
vm_map_remove(map, kva, kva + size);
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
shmfd->shm_kmappings--;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (0);
}
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 0ec26e9..ba05d0e 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1863,12 +1863,12 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
* reclamation of its vnode does not
* immediately destroy it.
*/
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if ((obj->flags & OBJ_DEAD) == 0) {
vm_object_reference_locked(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
} else {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
obj = NULL;
}
}
@@ -2045,7 +2045,7 @@ retry_space:
vm_offset_t pgoff;
struct mbuf *m0;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
/*
* Calculate the amount to transfer.
* Not to exceed a page, the EOF,
@@ -2063,7 +2063,7 @@ retry_space:
xfsize = omin(rem, xfsize);
xfsize = omin(space - loopbytes, xfsize);
if (xfsize <= 0) {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
done = 1; /* all data sent */
break;
}
@@ -2084,7 +2084,7 @@ retry_space:
* block.
*/
if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize))
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
else if (m != NULL)
error = EAGAIN; /* send what we already got */
else if (uap->flags & SF_NODISKIO)
@@ -2098,7 +2098,7 @@ retry_space:
* when the I/O completes.
*/
vm_page_io_start(pg);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
/*
* Get the page from backing store.
@@ -2120,10 +2120,10 @@ retry_space:
td->td_ucred, NOCRED, &resid, td);
VOP_UNLOCK(vp, 0);
after_read:
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_page_io_finish(pg);
if (!error)
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
mbstat.sf_iocnt++;
}
if (error) {
@@ -2138,7 +2138,7 @@ retry_space:
pg->busy == 0 && !(pg->oflags & VPO_BUSY))
vm_page_free(pg);
vm_page_unlock(pg);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (error == EAGAIN)
error = 0; /* not a real error */
break;
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index bae419d..28b3175 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -842,9 +842,9 @@ aio_fsync_vnode(struct thread *td, struct vnode *vp)
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index d8ed908..12217e4 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1380,7 +1380,7 @@ brelse(struct buf *bp)
*/
resid = bp->b_bufsize;
foff = bp->b_offset;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
int had_bogus = 0;
@@ -1428,7 +1428,7 @@ brelse(struct buf *bp)
resid -= PAGE_SIZE - (foff & PAGE_MASK);
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (bp->b_flags & (B_INVAL | B_RELBUF))
vfs_vmio_release(bp);
@@ -1656,7 +1656,7 @@ vfs_vmio_release(struct buf *bp)
vm_page_t m;
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
bp->b_pages[i] = NULL;
@@ -1688,7 +1688,7 @@ vfs_vmio_release(struct buf *bp)
}
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
if (bp->b_bufsize) {
bufspacewakeup();
@@ -2465,7 +2465,7 @@ inmem(struct vnode * vp, daddr_t blkno)
size = vp->v_mount->mnt_stat.f_iosize;
off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
if (!m)
@@ -2477,11 +2477,11 @@ inmem(struct vnode * vp, daddr_t blkno)
(vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
goto notinmem;
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return 1;
notinmem:
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (0);
}
@@ -2511,7 +2511,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
KASSERT(bp->b_offset != NOOFFSET,
("vfs_clean_pages_dirty_buf: no buffer offset"));
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
vfs_drain_busy_pages(bp);
vfs_setdirty_locked_object(bp);
for (i = 0; i < bp->b_npages; i++) {
@@ -2524,7 +2524,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
foff = noff;
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
}
static void
@@ -3036,7 +3036,7 @@ allocbuf(struct buf *bp, int size)
(vm_offset_t)bp->b_data) +
(desiredpages << PAGE_SHIFT),
(bp->b_npages - desiredpages));
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = desiredpages; i < bp->b_npages; i++) {
/*
* the page is not freed here -- it
@@ -3055,7 +3055,7 @@ allocbuf(struct buf *bp, int size)
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
bp->b_npages = desiredpages;
}
} else if (size > bp->b_bcount) {
@@ -3076,7 +3076,7 @@ allocbuf(struct buf *bp, int size)
obj = bp->b_bufobj->bo_object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
while (bp->b_npages < desiredpages) {
vm_page_t m;
@@ -3138,7 +3138,7 @@ allocbuf(struct buf *bp, int size)
toff += tinc;
tinc = PAGE_SIZE;
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
/*
* Step 3, fixup the KVM pmap. Remember that
@@ -3393,7 +3393,7 @@ bufdone_finish(struct buf *bp)
bp->b_flags |= B_CACHE;
}
bogus = 0;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
int bogusflag = 0;
int resid;
@@ -3435,7 +3435,7 @@ bufdone_finish(struct buf *bp)
iosize -= resid;
}
vm_object_pip_wakeupn(obj, 0);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (bogus)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
bp->b_pages, bp->b_npages);
@@ -3473,7 +3473,7 @@ vfs_unbusy_pages(struct buf *bp)
return;
obj = bp->b_bufobj->bo_object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
if (m == bogus_page) {
@@ -3488,7 +3488,7 @@ vfs_unbusy_pages(struct buf *bp)
vm_page_io_finish(m);
}
vm_object_pip_wakeupn(obj, 0);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
/*
@@ -3609,7 +3609,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
foff = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vfs_drain_busy_pages(bp);
if (bp->b_bufsize != 0)
vfs_setdirty_locked_object(bp);
@@ -3646,7 +3646,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
}
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (bogus)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
bp->b_pages, bp->b_npages);
@@ -3677,7 +3677,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
base += (bp->b_offset & PAGE_MASK);
n = PAGE_SIZE - (base & PAGE_MASK);
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
m = bp->b_pages[i];
if (n > size)
@@ -3687,7 +3687,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
size -= n;
n = PAGE_SIZE;
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
}
/*
@@ -3714,7 +3714,7 @@ vfs_bio_clrbuf(struct buf *bp)
}
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
(bp->b_offset & PAGE_MASK) == 0) {
if (bp->b_pages[0] == bogus_page)
@@ -3753,7 +3753,7 @@ vfs_bio_clrbuf(struct buf *bp)
bp->b_pages[i]->valid |= mask;
}
unlock:
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
bp->b_resid = 0;
}
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 0f22be9..1660cb4 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -407,7 +407,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
*/
off = tbp->b_offset;
tsize = size;
- VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
for (j = 0; tsize > 0; j++) {
toff = off & PAGE_MASK;
tinc = tsize;
@@ -421,7 +421,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
off += tinc;
tsize -= tinc;
}
- VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
if (tsize > 0) {
bqrelse(tbp);
break;
@@ -456,7 +456,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
BUF_KERNPROC(tbp);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
- VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
@@ -470,7 +470,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
if (m->valid == VM_PAGE_BITS_ALL)
tbp->b_pages[j] = bogus_page;
}
- VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
/*
* Don't inherit tbp->b_bufsize as it may be larger due to
* a non-page-aligned size. Instead just aggregate using
@@ -488,13 +488,13 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
* Fully valid pages in the cluster are already good and do not need
* to be re-read from disk. Replace the page with bogus_page
*/
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (j = 0; j < bp->b_npages; j++) {
VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, RA_WLOCKED);
if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
bp->b_pages[j] = bogus_page;
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
if (bp->b_bufsize > bp->b_kvasize)
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
bp->b_bufsize, bp->b_kvasize);
@@ -919,12 +919,12 @@ cluster_wbuild(vp, size, start_lbn, len)
if (tbp->b_flags & B_VMIO) {
vm_page_t m;
- VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
if (i != 0) { /* if not first buffer */
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
if (m->oflags & VPO_BUSY) {
- VM_OBJECT_UNLOCK(
+ VM_OBJECT_WUNLOCK(
tbp->b_object);
bqrelse(tbp);
goto finishcluster;
@@ -941,7 +941,7 @@ cluster_wbuild(vp, size, start_lbn, len)
bp->b_npages++;
}
}
- VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
}
bp->b_bcount += size;
bp->b_bufsize += size;
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index abfc2c8..72a4afb 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -1043,10 +1043,10 @@ vop_stdadvise(struct vop_advise_args *ap)
if (vp->v_object != NULL) {
start = trunc_page(ap->a_start);
end = round_page(ap->a_end);
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_cache(vp->v_object, OFF_TO_IDX(start),
OFF_TO_IDX(end));
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
VOP_UNLOCK(vp, 0);
break;
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 0fac234..d0c6bb8 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1245,9 +1245,9 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
bufobj_wwait(bo, 0, 0);
BO_UNLOCK(bo);
if (bo->bo_object != NULL) {
- VM_OBJECT_LOCK(bo->bo_object);
+ VM_OBJECT_WLOCK(bo->bo_object);
vm_object_pip_wait(bo->bo_object, "bovlbx");
- VM_OBJECT_UNLOCK(bo->bo_object);
+ VM_OBJECT_WUNLOCK(bo->bo_object);
}
BO_LOCK(bo);
} while (bo->bo_numoutput > 0);
@@ -1258,10 +1258,10 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
*/
if (bo->bo_object != NULL &&
(flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) {
- VM_OBJECT_LOCK(bo->bo_object);
+ VM_OBJECT_WLOCK(bo->bo_object);
vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
OBJPR_CLEANONLY : 0);
- VM_OBJECT_UNLOCK(bo->bo_object);
+ VM_OBJECT_WUNLOCK(bo->bo_object);
}
#ifdef INVARIANTS
@@ -2521,9 +2521,9 @@ vinactive(struct vnode *vp, struct thread *td)
*/
obj = vp->v_object;
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
VOP_INACTIVE(vp, td);
VI_LOCK(vp);
@@ -2604,9 +2604,9 @@ loop:
*/
if (flags & WRITECLOSE) {
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
if (error != 0) {
@@ -3504,11 +3504,11 @@ vfs_msync(struct mount *mp, int flags)
obj = vp->v_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0,
flags == MNT_WAIT ?
OBJPC_SYNC : OBJPC_NOSYNC);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
vput(vp);
}
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index be1a70f..4232935 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -3433,9 +3433,9 @@ sys_fsync(td, uap)
vn_lock(vp, lock_flags | LK_RETRY);
AUDIT_ARG_VNODE1(vp);
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 5105a48..271e022 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -1897,9 +1897,9 @@ vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
if ((object = vp->v_object) == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_page_remove(object, start, end, 0);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
int
OpenPOWER on IntegriCloud