summaryrefslogtreecommitdiffstats
path: root/sys/vm/vnode_pager.c
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/vm/vnode_pager.c
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/vm/vnode_pager.c')
-rw-r--r--sys/vm/vnode_pager.c85
1 files changed, 59 insertions, 26 deletions
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index aedc794..eb21c60 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -429,9 +429,11 @@ vnode_pager_setsize(vp, nsize)
* bits. This would prevent bogus_page
* replacement from working properly.
*/
+ vm_page_lock(m);
vm_page_lock_queues();
vm_page_clear_dirty(m, base, PAGE_SIZE - base);
vm_page_unlock_queues();
+ vm_page_unlock(m);
} else if ((nsize & PAGE_MASK) &&
__predict_false(object->cache != NULL)) {
vm_page_cache_free(object, OFF_TO_IDX(nsize),
@@ -719,11 +721,15 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL);
if (error == EOPNOTSUPP) {
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
+
for (i = 0; i < count; i++)
- if (i != reqpage)
+ if (i != reqpage) {
+ vm_page_lock(m[i]);
+ vm_page_lock_queues();
vm_page_free(m[i]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
+ }
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
error = vnode_pager_input_old(object, m[reqpage]);
@@ -731,11 +737,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
return (error);
} else if (error != 0) {
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
for (i = 0; i < count; i++)
- if (i != reqpage)
+ if (i != reqpage) {
+ vm_page_lock(m[i]);
+ vm_page_lock_queues();
vm_page_free(m[i]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
+ }
VM_OBJECT_UNLOCK(object);
return (VM_PAGER_ERROR);
@@ -747,11 +756,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
} else if ((PAGE_SIZE / bsize) > 1 &&
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
for (i = 0; i < count; i++)
- if (i != reqpage)
+ if (i != reqpage) {
+ vm_page_lock(m[i]);
+ vm_page_lock_queues();
vm_page_free(m[i]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
+ }
VM_OBJECT_UNLOCK(object);
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
@@ -765,11 +777,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
VM_OBJECT_LOCK(object);
if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
- vm_page_lock_queues();
for (i = 0; i < count; i++)
- if (i != reqpage)
+ if (i != reqpage) {
+ vm_page_lock(m[i]);
+ vm_page_lock_queues();
vm_page_free(m[i]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
+ }
VM_OBJECT_UNLOCK(object);
return VM_PAGER_OK;
} else if (reqblock == -1) {
@@ -777,11 +792,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
KASSERT(m[reqpage]->dirty == 0,
("vnode_pager_generic_getpages: page %p is dirty", m));
m[reqpage]->valid = VM_PAGE_BITS_ALL;
- vm_page_lock_queues();
for (i = 0; i < count; i++)
- if (i != reqpage)
+ if (i != reqpage) {
+ vm_page_lock(m[i]);
+ vm_page_lock_queues();
vm_page_free(m[i]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
+ }
VM_OBJECT_UNLOCK(object);
return (VM_PAGER_OK);
}
@@ -800,11 +818,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
&runpg) != 0) {
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
for (; i < count; i++)
- if (i != reqpage)
+ if (i != reqpage) {
+ vm_page_lock(m[i]);
+ vm_page_lock_queues();
vm_page_free(m[i]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
+ }
VM_OBJECT_UNLOCK(object);
return (VM_PAGER_ERROR);
}
@@ -818,9 +839,11 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
(object->un_pager.vnp.vnp_size >> 32),
(uintmax_t)object->un_pager.vnp.vnp_size);
}
+ vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
VM_OBJECT_UNLOCK(object);
runend = i + 1;
first = runend;
@@ -829,18 +852,24 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
runend = i + runpg;
if (runend <= reqpage) {
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
- for (j = i; j < runend; j++)
+ for (j = i; j < runend; j++) {
+ vm_page_lock(m[j]);
+ vm_page_lock_queues();
vm_page_free(m[j]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[j]);
+ }
VM_OBJECT_UNLOCK(object);
} else {
if (runpg < (count - first)) {
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
- for (i = first + runpg; i < count; i++)
+ for (i = first + runpg; i < count; i++) {
+ vm_page_lock(m[i]);
+ vm_page_lock_queues();
vm_page_free(m[i]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
+ }
VM_OBJECT_UNLOCK(object);
count = first + runpg;
}
@@ -931,13 +960,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
relpbuf(bp, &vnode_pbuf_freecnt);
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
vm_page_t mt;
nextoff = tfoff + PAGE_SIZE;
mt = m[i];
+ vm_page_lock(mt);
+ vm_page_lock_queues();
if (nextoff <= object->un_pager.vnp.vnp_size) {
/*
* Read filled up entire page.
@@ -989,8 +1019,9 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(mt);
}
}
+ vm_page_unlock_queues();
+ vm_page_unlock(mt);
}
- vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
if (error) {
printf("vnode_pager_getpages: I/O read error\n");
@@ -1113,10 +1144,12 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
maxsize = object->un_pager.vnp.vnp_size - poffset;
ncount = btoc(maxsize);
if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
+ vm_page_lock(m[ncount - 1]);
vm_page_lock_queues();
vm_page_clear_dirty(m[ncount - 1], pgoff,
PAGE_SIZE - pgoff);
vm_page_unlock_queues();
+ vm_page_unlock(m[ncount - 1]);
}
} else {
maxsize = 0;
OpenPOWER on IntegriCloud