summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>2004-10-28 08:38:46 +0000
committerphk <phk@FreeBSD.org>2004-10-28 08:38:46 +0000
commit08ed0626b77ca31e7c6730b872680b8b21a3f24c (patch)
treee4d6a7c6bb12f79d7de8ddf70a6de577fd6146e8
parent169dc919b8eccc8395d3b89e0bccdb8dfc61b13c (diff)
downloadFreeBSD-src-08ed0626b77ca31e7c6730b872680b8b21a3f24c.zip
FreeBSD-src-08ed0626b77ca31e7c6730b872680b8b21a3f24c.tar.gz
Lock bp->b_bufobj->b_object instead of bp->b_object
-rw-r--r--sys/kern/vfs_bio.c20
-rw-r--r--sys/kern/vfs_cluster.c16
2 files changed, 18 insertions, 18 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 64231f2..222852a 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1555,7 +1555,7 @@ vfs_vmio_release(struct buf *bp)
vm_page_t m;
GIANT_REQUIRED;
- VM_OBJECT_LOCK(bp->b_object);
+ VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
vm_page_lock_queues();
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
@@ -1592,7 +1592,7 @@ vfs_vmio_release(struct buf *bp)
}
}
vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(bp->b_object);
+ VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
if (bp->b_bufsize) {
@@ -2813,7 +2813,7 @@ allocbuf(struct buf *bp, int size)
if (desiredpages < bp->b_npages) {
vm_page_t m;
- VM_OBJECT_LOCK(bp->b_object);
+ VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
vm_page_lock_queues();
for (i = desiredpages; i < bp->b_npages; i++) {
/*
@@ -2831,7 +2831,7 @@ allocbuf(struct buf *bp, int size)
vm_page_unwire(m, 0);
}
vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(bp->b_object);
+ VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
(desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
bp->b_npages = desiredpages;
@@ -3452,7 +3452,7 @@ vfs_clean_pages(struct buf *bp)
foff = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("vfs_clean_pages: no buffer offset"));
- VM_OBJECT_LOCK(bp->b_object);
+ VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
vm_page_lock_queues();
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
@@ -3466,7 +3466,7 @@ vfs_clean_pages(struct buf *bp)
foff = noff;
}
vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(bp->b_object);
+ VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
}
/*
@@ -3496,7 +3496,7 @@ vfs_bio_set_validclean(struct buf *bp, int base, int size)
base += (bp->b_offset & PAGE_MASK);
n = PAGE_SIZE - (base & PAGE_MASK);
- VM_OBJECT_LOCK(bp->b_object);
+ VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
vm_page_lock_queues();
for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
m = bp->b_pages[i];
@@ -3510,7 +3510,7 @@ vfs_bio_set_validclean(struct buf *bp, int base, int size)
n = PAGE_SIZE;
}
vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(bp->b_object);
+ VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
}
/*
@@ -3537,7 +3537,7 @@ vfs_bio_clrbuf(struct buf *bp)
}
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
- VM_OBJECT_LOCK(bp->b_object);
+ VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
(bp->b_offset & PAGE_MASK) == 0) {
if (bp->b_pages[0] == bogus_page)
@@ -3579,7 +3579,7 @@ vfs_bio_clrbuf(struct buf *bp)
bp->b_pages[i]->valid |= mask;
}
unlock:
- VM_OBJECT_UNLOCK(bp->b_object);
+ VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
bp->b_resid = 0;
}
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index f866925..530f885 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -410,14 +410,14 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
* take part in the cluster. If it is partially valid
* then we stop.
*/
- VM_OBJECT_LOCK(tbp->b_object);
+ VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
for (j = 0;j < tbp->b_npages; j++) {
VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object,
MA_OWNED);
if (tbp->b_pages[j]->valid)
break;
}
- VM_OBJECT_UNLOCK(tbp->b_object);
+ VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
if (j != tbp->b_npages) {
bqrelse(tbp);
break;
@@ -452,7 +452,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
BUF_KERNPROC(tbp);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
- VM_OBJECT_LOCK(tbp->b_object);
+ VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
vm_page_lock_queues();
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
@@ -468,7 +468,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
tbp->b_pages[j] = bogus_page;
}
vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(tbp->b_object);
+ VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
/*
* XXX shouldn't this be += size for both, like in
* cluster_wbuild()?
@@ -489,7 +489,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
* Fully valid pages in the cluster are already good and do not need
* to be re-read from disk. Replace the page with bogus_page
*/
- VM_OBJECT_LOCK(bp->b_object);
+ VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
for (j = 0; j < bp->b_npages; j++) {
VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED);
if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) ==
@@ -497,7 +497,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
bp->b_pages[j] = bogus_page;
}
}
- VM_OBJECT_UNLOCK(bp->b_object);
+ VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
if (bp->b_bufsize > bp->b_kvasize)
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
bp->b_bufsize, bp->b_kvasize);
@@ -927,7 +927,7 @@ cluster_wbuild(vp, size, start_lbn, len)
if (tbp->b_flags & B_VMIO) {
vm_page_t m;
- VM_OBJECT_LOCK(tbp->b_object);
+ VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
if (i != 0) { /* if not first buffer */
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
@@ -951,7 +951,7 @@ cluster_wbuild(vp, size, start_lbn, len)
}
}
vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(tbp->b_object);
+ VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
}
bp->b_bcount += size;
bp->b_bufsize += size;
OpenPOWER on IntegriCloud