summaryrefslogtreecommitdiffstats
path: root/sys/vm/swap_pager.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-08-09 11:11:11 +0000
committerattilio <attilio@FreeBSD.org>2013-08-09 11:11:11 +0000
commit16c7563cf4ffce21e633bfb33a25ab16072fc75e (patch)
treee2e9382aea2fd37ac6a9eec19380892618e99f37 /sys/vm/swap_pager.c
parentfcb31f05a939358e510816e46ff7729b2ffd94da (diff)
downloadFreeBSD-src-16c7563cf4ffce21e633bfb33a25ab16072fc75e.zip
FreeBSD-src-16c7563cf4ffce21e633bfb33a25ab16072fc75e.tar.gz
The soft and hard busy mechanism rely on the vm object lock to work.
Unify the 2 concept into a real, minimal, sxlock where the shared acquisition represent the soft busy and the exclusive acquisition represent the hard busy. The old VPO_WANTED mechanism becames the hard-path for this new lock and it becomes per-page rather than per-object. The vm_object lock becames an interlock for this functionality: it can be held in both read or write mode. However, if the vm_object lock is held in read mode while acquiring or releasing the busy state, the thread owner cannot make any assumption on the busy state unless it is also busying it. Also: - Add a new flag to directly shared busy pages while vm_page_alloc and vm_page_grab are being executed. This will be very helpful once these functions happen under a read object lock. - Move the swapping sleep into its own per-object flag The KPI is heavilly changed this is why the version is bumped. It is very likely that some VM ports users will need to change their own code. Sponsored by: EMC / Isilon storage division Discussed with: alc Reviewed by: jeff, kib Tested by: gavin, bapt (older version) Tested by: pho, scottl
Diffstat (limited to 'sys/vm/swap_pager.c')
-rw-r--r--sys/vm/swap_pager.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 319811a..f7ba736 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1219,9 +1219,10 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
*/
VM_OBJECT_WLOCK(object);
while ((mreq->oflags & VPO_SWAPINPROG) != 0) {
- mreq->oflags |= VPO_WANTED;
+ mreq->oflags |= VPO_SWAPSLEEP;
PCPU_INC(cnt.v_intrans);
- if (VM_OBJECT_SLEEP(object, mreq, PSWP, "swread", hz * 20)) {
+ if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP,
+ "swread", hz * 20)) {
printf(
"swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
@@ -1459,12 +1460,6 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
* Completion routine for asynchronous reads and writes from/to swap.
* Also called manually by synchronous code to finish up a bp.
*
- * For READ operations, the pages are VPO_BUSY'd. For WRITE operations,
- * the pages are vm_page_t->busy'd. For READ operations, we VPO_BUSY
- * unbusy all pages except the 'main' request page. For WRITE
- * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
- * because we marked them all VM_PAGER_PEND on return from putpages ).
- *
* This routine may not sleep.
*/
static void
@@ -1514,6 +1509,10 @@ swp_pager_async_iodone(struct buf *bp)
vm_page_t m = bp->b_pages[i];
m->oflags &= ~VPO_SWAPINPROG;
+ if (m->oflags & VPO_SWAPSLEEP) {
+ m->oflags &= ~VPO_SWAPSLEEP;
+ wakeup(&object->paging_in_progress);
+ }
if (bp->b_ioflags & BIO_ERROR) {
/*
@@ -1542,8 +1541,11 @@ swp_pager_async_iodone(struct buf *bp)
m->valid = 0;
if (i != bp->b_pager.pg_reqpage)
swp_pager_free_nrpage(m);
- else
+ else {
+ vm_page_lock(m);
vm_page_flash(m);
+ vm_page_unlock(m);
+ }
/*
* If i == bp->b_pager.pg_reqpage, do not wake
* the page up. The caller needs to.
@@ -1558,7 +1560,7 @@ swp_pager_async_iodone(struct buf *bp)
vm_page_lock(m);
vm_page_activate(m);
vm_page_unlock(m);
- vm_page_io_finish(m);
+ vm_page_sunbusy(m);
}
} else if (bp->b_iocmd == BIO_READ) {
/*
@@ -1575,7 +1577,7 @@ swp_pager_async_iodone(struct buf *bp)
* Note that the requested page, reqpage, is left
* busied, but we still have to wake it up. The
* other pages are released (unbusied) by
- * vm_page_wakeup().
+ * vm_page_xunbusy().
*/
KASSERT(!pmap_page_is_mapped(m),
("swp_pager_async_iodone: page %p is mapped", m));
@@ -1595,9 +1597,12 @@ swp_pager_async_iodone(struct buf *bp)
vm_page_lock(m);
vm_page_deactivate(m);
vm_page_unlock(m);
- vm_page_wakeup(m);
- } else
+ vm_page_xunbusy(m);
+ } else {
+ vm_page_lock(m);
vm_page_flash(m);
+ vm_page_unlock(m);
+ }
} else {
/*
* For write success, clear the dirty
@@ -1608,7 +1613,7 @@ swp_pager_async_iodone(struct buf *bp)
("swp_pager_async_iodone: page %p is not write"
" protected", m));
vm_page_undirty(m);
- vm_page_io_finish(m);
+ vm_page_sunbusy(m);
if (vm_page_count_severe()) {
vm_page_lock(m);
vm_page_try_to_cache(m);
@@ -1706,19 +1711,18 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
vm_page_t m;
vm_object_pip_add(object, 1);
- m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
- VM_ALLOC_NOBUSY);
+ m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid == VM_PAGE_BITS_ALL) {
vm_object_pip_subtract(object, 1);
vm_page_dirty(m);
vm_page_lock(m);
vm_page_activate(m);
vm_page_unlock(m);
+ vm_page_xunbusy(m);
vm_pager_page_unswapped(m);
return;
}
- vm_page_busy(m);
if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK)
panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
vm_object_pip_subtract(object, 1);
@@ -1726,7 +1730,7 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
vm_page_lock(m);
vm_page_deactivate(m);
vm_page_unlock(m);
- vm_page_wakeup(m);
+ vm_page_xunbusy(m);
vm_pager_page_unswapped(m);
}
OpenPOWER on IntegriCloud