diff options
author | alc <alc@FreeBSD.org> | 2012-11-01 16:20:02 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2012-11-01 16:20:02 +0000 |
commit | 60d5a532fb33437f307e213f69f73216ec168117 (patch) | |
tree | daeafe3bfb9a6a453aa49264b3b8944b9f574e7a | |
parent | 888a8bb7701dcdb784bfaeb4cf3753264a679a60 (diff) | |
download | FreeBSD-src-60d5a532fb33437f307e213f69f73216ec168117.zip FreeBSD-src-60d5a532fb33437f307e213f69f73216ec168117.tar.gz |
In general, we call pmap_remove_all() before calling vm_page_cache(). So,
the call to pmap_remove_all() within vm_page_cache() is usually redundant.
This change eliminates that call to pmap_remove_all() and introduces a
call to pmap_remove_all() before vm_page_cache() in the one place where
it didn't already exist.
When iterating over a paging queue, if the object containing the current
page has a zero reference count, then the page can't have any managed
mappings. So, a call to pmap_remove_all() is pointless.
Change a panic() call in vm_page_cache() to a KASSERT().
MFC after: 6 weeks
-rw-r--r-- | sys/vm/vm_page.c | 6 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 35 |
2 files changed, 13 insertions, 28 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index b2d2a69..ef9fb85 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2277,9 +2277,9 @@ vm_page_cache(vm_page_t m) if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy || m->hold_count || m->wire_count) panic("vm_page_cache: attempting to cache busy page"); - pmap_remove_all(m); - if (m->dirty != 0) - panic("vm_page_cache: page %p is dirty", m); + KASSERT(!pmap_page_is_mapped(m), + ("vm_page_cache: page %p is mapped", m)); + KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m)); if (m->valid == 0 || object->type == OBJT_DEFAULT || (object->type == OBJT_SWAP && !vm_pager_has_page(object, m->pindex, NULL, NULL))) { diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 2878c93..9f3ea6c 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -594,7 +594,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high) continue; } vm_page_test_dirty(m); - if (m->dirty == 0) + if (m->dirty == 0 && object->ref_count != 0) pmap_remove_all(m); if (m->dirty != 0) { vm_page_unlock(m); @@ -1059,31 +1059,16 @@ vm_pageout_scan(int pass) } /* - * If the upper level VM system does not believe that the page - * is fully dirty, but it is mapped for write access, then we - * consult the pmap to see if the page's dirty status should - * be updated. + * If the page appears to be clean at the machine-independent + * layer, then remove all of its mappings from the pmap in + * anticipation of placing it onto the cache queue. If, + * however, any of the page's mappings allow write access, + * then the page may still be modified until the last of those + * mappings are removed. */ - if (m->dirty != VM_PAGE_BITS_ALL && - pmap_page_is_write_mapped(m)) { - /* - * Avoid a race condition: Unless write access is - * removed from the page, another processor could - * modify it before all access is removed by the call - * to vm_page_cache() below. If vm_page_cache() finds - * that the page has been modified when it removes all - * access, it panics because it cannot cache dirty - * pages. In principle, we could eliminate just write - * access here rather than all access. In the expected - * case, when there are no last instant modifications - * to the page, removing all access will be cheaper - * overall. - */ - if (pmap_is_modified(m)) - vm_page_dirty(m); - else if (m->dirty == 0) - pmap_remove_all(m); - } + vm_page_test_dirty(m); + if (m->dirty == 0 && object->ref_count != 0) + pmap_remove_all(m); if (m->valid == 0) { /* |