diff options
author | alc <alc@FreeBSD.org> | 2011-06-29 16:40:41 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2011-06-29 16:40:41 +0000 |
commit | 21902be08cad124037a2152459b485a54308e5ca (patch) | |
tree | bffb0499b364b344b9d5ddbae978932a240d3045 /sys | |
parent | 7b0555e88a9f208857e299f3e8380d8c8f67119b (diff) | |
download | FreeBSD-src-21902be08cad124037a2152459b485a54308e5ca.zip FreeBSD-src-21902be08cad124037a2152459b485a54308e5ca.tar.gz |
Add a new option, OBJPR_NOTMAPPED, to vm_object_page_remove(). Passing this
option to vm_object_page_remove() asserts that the specified range of pages
is not mapped, or more precisely that none of these pages have any managed
mappings. Thus, vm_object_page_remove() need not call pmap_remove_all() on
the pages.
This change not only saves time by eliminating pointless calls to
pmap_remove_all(), but it also eliminates an inconsistency in the use of
pmap_remove_all() versus related functions, like pmap_remove_write(). It
eliminates harmless but pointless calls to pmap_remove_all() that were being
performed on PG_UNMANAGED pages.
Update all of the existing assertions on pmap_remove_all() to reflect this
change.
Reviewed by: kib
Diffstat (limited to 'sys')
-rw-r--r-- | sys/amd64/amd64/pmap.c | 4 | ||||
-rw-r--r-- | sys/arm/arm/pmap.c | 4 | ||||
-rw-r--r-- | sys/fs/tmpfs/tmpfs_subr.c | 2 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 4 | ||||
-rw-r--r-- | sys/i386/xen/pmap.c | 4 | ||||
-rw-r--r-- | sys/ia64/ia64/pmap.c | 4 | ||||
-rw-r--r-- | sys/kern/uipc_shm.c | 2 | ||||
-rw-r--r-- | sys/kern/vfs_subr.c | 4 | ||||
-rw-r--r-- | sys/mips/mips/pmap.c | 4 | ||||
-rw-r--r-- | sys/ufs/ffs/ffs_inode.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 10 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 123 | ||||
-rw-r--r-- | sys/vm/vm_object.h | 9 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 2 |
14 files changed, 100 insertions, 78 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 025ca5f..941e85e 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -2847,8 +2847,8 @@ pmap_remove_all(vm_page_t m) vm_offset_t va; vm_page_t free; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_remove_all: page %p is not managed", m)); free = NULL; vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index cecf363..f684402 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -3120,8 +3120,8 @@ pmap_remove_all(vm_page_t m) pmap_t curpm; int flags = 0; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_remove_all: page %p is not managed", m)); if (TAILQ_EMPTY(&m->md.pv_list)) return; vm_page_lock_queues(); diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c index 5a07a45..e9324cf 100644 --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -926,7 +926,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize) if (newpages < oldpages) { swap_pager_freespace(uobj, newpages, oldpages - newpages); - vm_object_page_remove(uobj, newpages, 0, FALSE); + vm_object_page_remove(uobj, newpages, 0, 0); } /* diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 3f9248d..45d19b2 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -2927,8 +2927,8 @@ pmap_remove_all(vm_page_t m) vm_offset_t va; vm_page_t free; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_remove_all: page %p is not managed", m)); free = NULL; vm_page_lock_queues(); sched_pin(); diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index 3efa4f1..a23ae6d 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -2421,8 +2421,8 @@ pmap_remove_all(vm_page_t m) pt_entry_t *pte, tpte; vm_page_t free; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_remove_all: page %p is not managed", m)); free = NULL; vm_page_lock_queues(); sched_pin(); diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c index 625d0af7..b36f813 100644 --- a/sys/ia64/ia64/pmap.c +++ b/sys/ia64/ia64/pmap.c @@ -1424,8 +1424,8 @@ pmap_remove_all(vm_page_t m) pmap_t oldpmap; pv_entry_t pv; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_remove_all: page %p is not managed", m)); vm_page_lock_queues(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { struct ia64_lpte *pte; diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c index 00496af..cea70c7 100644 --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -264,7 +264,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length) /* Toss in memory pages. */ if (nobjsize < object->size) vm_object_page_remove(object, nobjsize, object->size, - FALSE); + 0); /* Toss pages from swap. */ if (object->type == OBJT_SWAP) diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index c5ae83c..934745b 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -1190,8 +1190,8 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) */ if (bo->bo_object != NULL && (flags & (V_ALT | V_NORMAL)) == 0) { VM_OBJECT_LOCK(bo->bo_object); - vm_object_page_remove(bo->bo_object, 0, 0, - (flags & V_SAVE) ? TRUE : FALSE); + vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? + OBJPR_CLEANONLY : 0); VM_OBJECT_UNLOCK(bo->bo_object); } diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c index f7ea660..1d4d411 100644 --- a/sys/mips/mips/pmap.c +++ b/sys/mips/mips/pmap.c @@ -1711,8 +1711,8 @@ pmap_remove_all(vm_page_t m) pv_entry_t pv; pt_entry_t *pte, tpte; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_remove_all: page %p is not managed", m)); vm_page_lock_queues(); if (m->md.pv_flags & PV_TABLE_REF) diff --git a/sys/ufs/ffs/ffs_inode.c b/sys/ufs/ffs/ffs_inode.c index a7b43e2..62fa8e0 100644 --- a/sys/ufs/ffs/ffs_inode.c +++ b/sys/ufs/ffs/ffs_inode.c @@ -128,7 +128,7 @@ ffs_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) if ((object = vp->v_object) == NULL) return; VM_OBJECT_LOCK(object); - vm_object_page_remove(object, start, end, FALSE); + vm_object_page_remove(object, start, end, 0); VM_OBJECT_UNLOCK(object); } diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 752354a..31886af 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -2708,7 +2708,15 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || object == kernel_object || object == kmem_object)) { vm_object_collapse(object); - vm_object_page_remove(object, offidxstart, offidxend, FALSE); + + /* + * The option OBJPR_NOTMAPPED can be passed here + * because vm_map_delete() already performed + * pmap_remove() on the only mapping to this range + * of pages. + */ + vm_object_page_remove(object, offidxstart, offidxend, + OBJPR_NOTMAPPED); if (object->type == OBJT_SWAP) swap_pager_freespace(object, offidxstart, count); if (offidxend >= object->size && diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 1a3d398..b5788f5 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -923,6 +923,10 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, * We invalidate (remove) all pages from the address space * for semantic correctness. * + * If the backing object is a device object with unmanaged pages, then any + * mappings to the specified range of pages must be removed before this + * function is called. + * * Note: certain anonymous maps, such as MAP_NOSYNC maps, * may start out with a NULL object. */ @@ -978,12 +982,19 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, } if ((object->type == OBJT_VNODE || object->type == OBJT_DEVICE) && invalidate) { - boolean_t purge; - purge = old_msync || (object->type == OBJT_DEVICE); - vm_object_page_remove(object, - OFF_TO_IDX(offset), - OFF_TO_IDX(offset + size + PAGE_MASK), - purge ? FALSE : TRUE); + if (object->type == OBJT_DEVICE) + /* + * The option OBJPR_NOTMAPPED must be passed here + * because vm_object_page_remove() cannot remove + * unmanaged mappings. + */ + flags = OBJPR_NOTMAPPED; + else if (old_msync) + flags = 0; + else + flags = OBJPR_CLEANONLY; + vm_object_page_remove(object, OFF_TO_IDX(offset), + OFF_TO_IDX(offset + size + PAGE_MASK), flags); } VM_OBJECT_UNLOCK(object); } @@ -1754,76 +1765,70 @@ vm_object_collapse(vm_object_t object) * vm_object_page_remove: * * For the given object, either frees or invalidates each of the - * specified pages. In general, a page is freed. However, if a - * page is wired for any reason other than the existence of a - * managed, wired mapping, then it may be invalidated but not - * removed from the object. Pages are specified by the given - * range ["start", "end") and Boolean "clean_only". As a - * special case, if "end" is zero, then the range extends from - * "start" to the end of the object. If "clean_only" is TRUE, - * then only the non-dirty pages within the specified range are - * affected. + * specified pages. In general, a page is freed. However, if a page is + * wired for any reason other than the existence of a managed, wired + * mapping, then it may be invalidated but not removed from the object. + * Pages are specified by the given range ["start", "end") and the option + * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range + * extends from "start" to the end of the object. If the option + * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the + * specified range are affected. If the option OBJPR_NOTMAPPED is + * specified, then the pages within the specified range must have no + * mappings. Otherwise, if this option is not specified, any mappings to + * the specified pages are removed before the pages are freed or + * invalidated. * - * In general, this operation should only be performed on objects - * that contain managed pages. There are two exceptions. First, - * it may be performed on the kernel and kmem objects. Second, - * it may be used by msync(..., MS_INVALIDATE) to invalidate - * device-backed pages. In both of these cases, "clean_only" - * must be FALSE. + * In general, this operation should only be performed on objects that + * contain managed pages. There are, however, two exceptions. First, it + * is performed on the kernel and kmem objects by vm_map_entry_delete(). + * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- + * backed pages. In both of these cases, the option OBJPR_CLEANONLY must + * not be specified and the option OBJPR_NOTMAPPED must be specified. * * The object must be locked. */ void vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, - boolean_t clean_only) + int options) { vm_page_t p, next; int wirings; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_PHYS) || + (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, + ("vm_object_page_remove: illegal options for object %p", object)); if (object->resident_page_count == 0) goto skipmemq; - - /* - * Since physically-backed objects do not use managed pages, we can't - * remove pages from the object (we must instead remove the page - * references, and then destroy the object). - */ - KASSERT(object->type != OBJT_PHYS || object == kernel_object || - object == kmem_object, - ("attempt to remove pages from a physical object")); - vm_object_pip_add(object, 1); again: p = vm_page_find_least(object, start); /* - * Assert: the variable p is either (1) the page with the - * least pindex greater than or equal to the parameter pindex - * or (2) NULL. + * Here, the variable "p" is either (1) the page with the least pindex + * greater than or equal to the parameter "start" or (2) NULL. */ - for (; - p != NULL && (p->pindex < end || end == 0); - p = next) { + for (; p != NULL && (p->pindex < end || end == 0); p = next) { next = TAILQ_NEXT(p, listq); /* - * If the page is wired for any reason besides the - * existence of managed, wired mappings, then it cannot - * be freed. For example, fictitious pages, which - * represent device memory, are inherently wired and - * cannot be freed. They can, however, be invalidated - * if "clean_only" is FALSE. + * If the page is wired for any reason besides the existence + * of managed, wired mappings, then it cannot be freed. For + * example, fictitious pages, which represent device memory, + * are inherently wired and cannot be freed. They can, + * however, be invalidated if the option OBJPR_CLEANONLY is + * not specified. */ vm_page_lock(p); if ((wirings = p->wire_count) != 0 && (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { - /* Fictitious pages do not have managed mappings. */ - if ((p->flags & PG_FICTITIOUS) == 0) + if ((options & OBJPR_NOTMAPPED) == 0) { pmap_remove_all(p); - /* Account for removal of managed, wired mappings. */ - p->wire_count -= wirings; - if (!clean_only) { + /* Account for removal of wired mappings. */ + if (wirings != 0) + p->wire_count -= wirings; + } + if ((options & OBJPR_CLEANONLY) == 0) { p->valid = 0; vm_page_undirty(p); } @@ -1834,17 +1839,20 @@ again: goto again; KASSERT((p->flags & PG_FICTITIOUS) == 0, ("vm_object_page_remove: page %p is fictitious", p)); - if (clean_only && p->valid) { - pmap_remove_write(p); + if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) { + if ((options & OBJPR_NOTMAPPED) == 0) + pmap_remove_write(p); if (p->dirty) { vm_page_unlock(p); continue; } } - pmap_remove_all(p); - /* Account for removal of managed, wired mappings. */ - if (wirings != 0) - p->wire_count -= wirings; + if ((options & OBJPR_NOTMAPPED) == 0) { + pmap_remove_all(p); + /* Account for removal of wired mappings. */ + if (wirings != 0) + p->wire_count -= wirings; + } vm_page_free(p); vm_page_unlock(p); } @@ -1991,9 +1999,8 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, * deallocation. */ if (next_pindex < prev_object->size) { - vm_object_page_remove(prev_object, - next_pindex, - next_pindex + next_size, FALSE); + vm_object_page_remove(prev_object, next_pindex, next_pindex + + next_size, 0); if (prev_object->type == OBJT_SWAP) swap_pager_freespace(prev_object, next_pindex, next_size); diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index 0f4f47d..a11f144 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -168,6 +168,12 @@ struct vm_object { #define OBJPC_INVAL 0x2 /* invalidate */ #define OBJPC_NOSYNC 0x4 /* skip if PG_NOSYNC */ +/* + * The following options are supported by vm_object_page_remove(). + */ +#define OBJPR_CLEANONLY 0x1 /* Don't remove dirty pages. */ +#define OBJPR_NOTMAPPED 0x2 /* Don't unmap pages. */ + TAILQ_HEAD(object_q, vm_object); extern struct object_q vm_object_list; /* list of allocated objects */ @@ -219,7 +225,8 @@ void vm_object_set_writeable_dirty (vm_object_t); void vm_object_init (void); void vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, int flags); -void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t); +void vm_object_page_remove(vm_object_t object, vm_pindex_t start, + vm_pindex_t end, int options); boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t); void vm_object_print(long addr, boolean_t have_addr, long count, char *modif); void vm_object_reference (vm_object_t); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index a8eca20..23ade63 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -387,7 +387,7 @@ vnode_pager_setsize(vp, nsize) */ if (nobjsize < object->size) vm_object_page_remove(object, nobjsize, object->size, - FALSE); + 0); /* * this gets rid of garbage at the end of a page that is now * only partially backed by the vnode. |