summaryrefslogtreecommitdiffstats
path: root/sys/vm/swap_pager.c
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/vm/swap_pager.c
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/vm/swap_pager.c')
-rw-r--r--sys/vm/swap_pager.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index f47719b..6017a52 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1137,12 +1137,21 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
if (0 < i || j < count) {
int k;
- vm_page_lock_queues();
- for (k = 0; k < i; ++k)
+
+ for (k = 0; k < i; ++k) {
+ vm_page_lock(m[k]);
+ vm_page_lock_queues();
swp_pager_free_nrpage(m[k]);
- for (k = j; k < count; ++k)
+ vm_page_unlock_queues();
+ vm_page_unlock(m[k]);
+ }
+ for (k = j; k < count; ++k) {
+ vm_page_lock(m[k]);
+ vm_page_lock_queues();
swp_pager_free_nrpage(m[k]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[k]);
+ }
}
/*
@@ -1497,7 +1506,7 @@ swp_pager_async_iodone(struct buf *bp)
object = bp->b_pages[0]->object;
VM_OBJECT_LOCK(object);
}
- vm_page_lock_queues();
+
/*
* cleanup pages. If an error occurs writing to swap, we are in
* very serious trouble. If it happens to be a disk error, though,
@@ -1509,6 +1518,8 @@ swp_pager_async_iodone(struct buf *bp)
for (i = 0; i < bp->b_npages; ++i) {
vm_page_t m = bp->b_pages[i];
+ vm_page_lock(m);
+ vm_page_lock_queues();
m->oflags &= ~VPO_SWAPINPROG;
if (bp->b_ioflags & BIO_ERROR) {
@@ -1605,8 +1616,9 @@ swp_pager_async_iodone(struct buf *bp)
if (vm_page_count_severe())
vm_page_try_to_cache(m);
}
+ vm_page_unlock_queues();
+ vm_page_unlock(m);
}
- vm_page_unlock_queues();
/*
* adjust pip. NOTE: the original parent may still have its own
@@ -1702,10 +1714,12 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
if (m->valid == VM_PAGE_BITS_ALL) {
vm_object_pip_subtract(object, 1);
+ vm_page_lock(m);
vm_page_lock_queues();
vm_page_activate(m);
vm_page_dirty(m);
vm_page_unlock_queues();
+ vm_page_unlock(m);
vm_page_wakeup(m);
vm_pager_page_unswapped(m);
return;
@@ -1714,10 +1728,12 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK)
panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
vm_object_pip_subtract(object, 1);
+ vm_page_lock(m);
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_dontneed(m);
vm_page_unlock_queues();
+ vm_page_unlock(m);
vm_page_wakeup(m);
vm_pager_page_unswapped(m);
}
OpenPOWER on IntegriCloud