summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/kern/vfs_bio.c
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 156b676..ea846a5 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -3860,12 +3860,12 @@ vmapbuf(struct buf *bp)
retry:
if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data,
prot) < 0) {
- vm_page_lock_queues();
for (i = 0; i < pidx; ++i) {
+ vm_page_lock(bp->b_pages[i]);
vm_page_unhold(bp->b_pages[i]);
+ vm_page_unlock(bp->b_pages[i]);
bp->b_pages[i] = NULL;
}
- vm_page_unlock_queues();
return(-1);
}
m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot);
@@ -3896,11 +3896,12 @@ vunmapbuf(struct buf *bp)
npages = bp->b_npages;
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
- vm_page_lock_queues();
- for (pidx = 0; pidx < npages; pidx++)
+ for (pidx = 0; pidx < npages; pidx++) {
+ vm_page_lock(bp->b_pages[pidx]);
vm_page_unhold(bp->b_pages[pidx]);
- vm_page_unlock_queues();
-
+ vm_page_unlock(bp->b_pages[pidx]);
+ }
+
bp->b_data = bp->b_saveaddr;
}
OpenPOWER on IntegriCloud