diff options
author | dyson <dyson@FreeBSD.org> | 1996-01-19 04:00:31 +0000 |
---|---|---|
committer | dyson <dyson@FreeBSD.org> | 1996-01-19 04:00:31 +0000 |
commit | 8fc8a772af22f6e03233d248fa2dbd9b5c2bdd7d (patch) | |
tree | 3c31fd95ea745005a9cd6733db5a16f31bd828a6 /sys/vm/swap_pager.c | |
parent | 6755beedbf0ddaa9e66e91c8e74f620ede6bfad5 (diff) | |
download | FreeBSD-src-8fc8a772af22f6e03233d248fa2dbd9b5c2bdd7d.zip FreeBSD-src-8fc8a772af22f6e03233d248fa2dbd9b5c2bdd7d.tar.gz |
Eliminated many redundant vm_map_lookup operations for vm_mmap.
Speed up for vfs_bio -- addition of a routine bqrelse to greatly diminish
overhead for merged cache.
Efficiency improvement for vfs_cluster. It used to do alot of redundant
calls to cluster_rbuild.
Correct the ordering for vrele of .text and release of credentials.
Use the selective tlb update for 486/586/P6.
Numerous fixes to the size of objects allocated for files. Additionally,
fixes in the various pagers.
Fixes for proper positioning of vnode_pager_setsize in msdosfs and ext2fs.
Fixes in the swap pager for exhausted resources. The pageout code
will not as readily thrash.
Change the page queue flags (PG_ACTIVE, PG_INACTIVE, PG_FREE, PG_CACHE) into
page queue indices (PQ_ACTIVE, PQ_INACTIVE, PQ_FREE, PQ_CACHE),
thereby improving efficiency of several routines.
Eliminate even more unnecessary vm_page_protect operations.
Significantly speed up process forks.
Make vm_object_page_clean more efficient, thereby eliminating the pause
that happens every 30seconds.
Make sequential clustered writes B_ASYNC instead of B_DELWRI even in the
case of filesystems mounted async.
Fix a panic with busy pages when write clustering is done for non-VMIO
buffers.
Diffstat (limited to 'sys/vm/swap_pager.c')
-rw-r--r-- | sys/vm/swap_pager.c | 66 |
1 files changed, 38 insertions, 28 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 1fc7f85..25b608d 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -39,7 +39,7 @@ * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ * * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 - * $Id: swap_pager.c,v 1.57 1995/12/14 09:54:52 phk Exp $ + * $Id: swap_pager.c,v 1.58 1995/12/17 07:19:55 bde Exp $ */ /* @@ -302,13 +302,13 @@ swap_pager_alloc(handle, size, prot, offset) * rip support of "named anonymous regions" out altogether. */ object = vm_object_allocate(OBJT_SWAP, - OFF_TO_IDX(offset+ PAGE_SIZE - 1 + size)); + OFF_TO_IDX(offset + PAGE_SIZE - 1) + size); object->handle = handle; (void) swap_pager_swp_alloc(object, M_WAITOK); } } else { object = vm_object_allocate(OBJT_SWAP, - OFF_TO_IDX(offset + PAGE_SIZE - 1 + size)); + OFF_TO_IDX(offset + PAGE_SIZE - 1) + size); (void) swap_pager_swp_alloc(object, M_WAITOK); } @@ -1266,6 +1266,7 @@ swap_pager_putpages(object, m, count, sync, rtvals) swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) { s = splbio(); if (curproc == pageproc) { +retryfree: /* * pageout daemon needs a swap control block */ @@ -1273,33 +1274,42 @@ swap_pager_putpages(object, m, count, sync, rtvals) /* * if it does not get one within a short time, then * there is a potential deadlock, so we go-on trying - * to free pages. + * to free pages. It is important to block here as opposed + * to returning, thereby allowing the pageout daemon to continue. + * It is likely that pageout daemon will start suboptimally + * reclaiming vnode backed pages if we don't block. Since the + * I/O subsystem is probably already fully utilized, might as + * well wait. */ - tsleep(&swap_pager_free, PVM, "swpfre", hz/10); - swap_pager_sync(); - if (swap_pager_free.tqh_first == NULL || - swap_pager_free.tqh_first->spc_list.tqe_next == NULL || - swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) { - splx(s); - return VM_PAGER_AGAIN; + if (tsleep(&swap_pager_free, PVM, "swpfre", hz/5)) { + swap_pager_sync(); + if (swap_pager_free.tqh_first == NULL || + swap_pager_free.tqh_first->spc_list.tqe_next == NULL || + swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) { + splx(s); + return VM_PAGER_AGAIN; + } + } else { + /* + * we make sure that pageouts aren't taking up all of + * the free swap control blocks. + */ + swap_pager_sync(); + if (swap_pager_free.tqh_first == NULL || + swap_pager_free.tqh_first->spc_list.tqe_next == NULL || + swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) { + goto retryfree; + } } - } else + } else { pagedaemon_wakeup(); - while (swap_pager_free.tqh_first == NULL || - swap_pager_free.tqh_first->spc_list.tqe_next == NULL || - swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) { - if (curproc == pageproc) { - swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT; - if((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_reserved) - wakeup(&cnt.v_free_count); - } - - swap_pager_needflags |= SWAP_FREE_NEEDED; - tsleep(&swap_pager_free, PVM, "swpfre", 0); - if (curproc == pageproc) - swap_pager_sync(); - else + while (swap_pager_free.tqh_first == NULL || + swap_pager_free.tqh_first->spc_list.tqe_next == NULL || + swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) { + swap_pager_needflags |= SWAP_FREE_NEEDED; + tsleep(&swap_pager_free, PVM, "swpfre", 0); pagedaemon_wakeup(); + } } splx(s); } @@ -1436,7 +1446,7 @@ swap_pager_putpages(object, m, count, sync, rtvals) * optimization, if a page has been read * during the pageout process, we activate it. */ - if ((m[i]->flags & PG_ACTIVE) == 0 && + if ((m[i]->queue != PQ_ACTIVE) && ((m[i]->flags & (PG_WANTED|PG_REFERENCED)) || pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))) { vm_page_activate(m[i]); @@ -1542,7 +1552,7 @@ swap_pager_finish(spc) for (i = 0; i < spc->spc_count; i++) { pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i])); spc->spc_m[i]->dirty = 0; - if ((spc->spc_m[i]->flags & PG_ACTIVE) == 0 && + if ((spc->spc_m[i]->queue != PQ_ACTIVE) && ((spc->spc_m[i]->flags & PG_WANTED) || pmap_is_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i])))) vm_page_activate(spc->spc_m[i]); } |