diff options
author | alc <alc@FreeBSD.org> | 2009-06-02 08:02:27 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2009-06-02 08:02:27 +0000 |
commit | 4a004094863695f228584bd2dd78759f39280f3d (patch) | |
tree | 093f5d6b6fd20e3168e8b72f1bec881789e2f8e2 /sys | |
parent | b741ad3e96345042455577273b4346af99e4296a (diff) | |
download | FreeBSD-src-4a004094863695f228584bd2dd78759f39280f3d.zip FreeBSD-src-4a004094863695f228584bd2dd78759f39280f3d.tar.gz |
Correct a boundary case error in the management of a page's dirty bits by
shm_dotruncate() and vnode_pager_setsize(). Specifically, if the length of
a shared memory object or a file is truncated such that the length modulo
the page size is between 1 and 511, then all of the page's dirty bits were
cleared. Now, a dirty bit is cleared only if the corresponding block is
truncated in its entirety.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/uipc_shm.c | 21 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 26 |
2 files changed, 33 insertions, 14 deletions
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c index a12c731..00fb438 100644 --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -274,7 +274,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length) /* * If the last page is partially mapped, then zero out * the garbage at the end of the page. See comments - * in vnode_page_setsize() for more details. + * in vnode_pager_setsize() for more details. * * XXXJHB: This handles in memory pages, but what about * a page swapped out to disk? @@ -286,10 +286,23 @@ shm_dotruncate(struct shmfd *shmfd, off_t length) int size = PAGE_SIZE - base; pmap_zero_page_area(m, base, size); + + /* + * Update the valid bits to reflect the blocks that + * have been zeroed. Some of these valid bits may + * have already been set. + */ + vm_page_set_valid(m, base, size); + + /* + * Round "base" to the next block boundary so that the + * dirty bit for a partially zeroed block is not + * cleared. + */ + base = roundup2(base, DEV_BSIZE); + vm_page_lock_queues(); - vm_page_set_validclean(m, base, size); - if (m->dirty != 0) - m->dirty = VM_PAGE_BITS_ALL; + vm_page_clear_dirty(m, base, PAGE_SIZE - base); vm_page_unlock_queues(); } else if ((length & PAGE_MASK) && __predict_false(object->cache != NULL)) { diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index ed09044..47177e3 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -403,22 +403,28 @@ vnode_pager_setsize(vp, nsize) pmap_zero_page_area(m, base, size); /* - * Clear out partial-page dirty bits. This - * has the side effect of setting the valid - * bits, but that is ok. There are a bunch - * of places in the VM system where we expected - * m->dirty == VM_PAGE_BITS_ALL. The file EOF - * case is one of them. If the page is still - * partially dirty, make it fully dirty. + * Update the valid bits to reflect the blocks that + * have been zeroed. Some of these valid bits may + * have already been set. + */ + vm_page_set_valid(m, base, size); + + /* + * Round "base" to the next block boundary so that the + * dirty bit for a partially zeroed block is not + * cleared. + */ + base = roundup2(base, DEV_BSIZE); + + /* + * Clear out partial-page dirty bits. * * note that we do not clear out the valid * bits. This would prevent bogus_page * replacement from working properly. */ vm_page_lock_queues(); - vm_page_set_validclean(m, base, size); - if (m->dirty != 0) - m->dirty = VM_PAGE_BITS_ALL; + vm_page_clear_dirty(m, base, PAGE_SIZE - base); vm_page_unlock_queues(); } else if ((nsize & PAGE_MASK) && __predict_false(object->cache != NULL)) { |