diff options
author | dillon <dillon@FreeBSD.org> | 1999-02-12 09:51:43 +0000 |
---|---|---|
committer | dillon <dillon@FreeBSD.org> | 1999-02-12 09:51:43 +0000 |
commit | e38d19126b1d9c854c2d84785f6a15b2770fde83 (patch) | |
tree | 9eac3e8e3f738df866a250bb544495c61cb1e07c /sys/vm | |
parent | 22876d91387e8dfd0214a5d6be980a2bbcc8523d (diff) | |
download | FreeBSD-src-e38d19126b1d9c854c2d84785f6a15b2770fde83.zip FreeBSD-src-e38d19126b1d9c854c2d84785f6a15b2770fde83.tar.gz |
Fix non-fatal bug in vm_map_insert() which improperly cleared
OBJ_ONEMAPPING in the case where an object is extended by an
additional vm_map_entry must be allocated.
In vm_object_madvise(), remove calll to vm_page_cache() in MADV_FREE
case in order to avoid a page fault on page reuse. However, we still
mark the page as clean and destroy any swap backing store.
Submitted by: Alan Cox <alc@cs.rice.edu>
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/vm_map.c | 79 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 30 |
2 files changed, 49 insertions, 60 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index a44124d..f43c38d 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_map.c,v 1.147 1999/02/03 01:57:16 dillon Exp $ + * $Id: vm_map.c,v 1.148 1999/02/07 21:48:22 dillon Exp $ */ /* @@ -429,6 +429,9 @@ vm_map_lookup_entry(map, address, entry) * size should match that of the address range. * * Requires that the map be locked, and leaves it so. + * + * If object is non-NULL, ref count must be bumped by caller + * prior to making call to account for the new entry. */ int vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, @@ -438,9 +441,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_map_entry_t new_entry; vm_map_entry_t prev_entry; vm_map_entry_t temp_entry; -#if 0 - vm_object_t prev_object; -#endif u_char protoeflags; if ((object != NULL) && (cow & MAP_NOFAULT)) { @@ -483,13 +483,18 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, if (cow & MAP_NOFAULT) protoeflags |= MAP_ENTRY_NOFAULT; - /* - * See if we can avoid creating a new entry by extending one of our - * neighbors. Or at least extend the object. - */ - - if ( - (object == NULL) && + if (object) { + /* + * When object is non-NULL, it could be shared with another + * process. We have to set or clear OBJ_ONEMAPPING + * appropriately. + */ + if ((object->ref_count > 1) || (object->shadow_count != 0)) { + vm_object_clear_flag(object, OBJ_ONEMAPPING); + } else { + vm_object_set_flag(object, OBJ_ONEMAPPING); + } + } else if ( (prev_entry != &map->header) && ((prev_entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) && ((prev_entry->object.vm_object == NULL) || @@ -506,8 +511,9 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, (vm_size_t) (end - prev_entry->end)))) { /* - * Coalesced the two objects. Can we extend the - * previous map entry to include the new range? + * We were able to extend the object. Determine if we + * can extend the previous map entry to include the + * new range as well. */ if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && (prev_entry->protection == prot) && @@ -515,28 +521,29 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, map->size += (end - prev_entry->end); prev_entry->end = end; -#if 0 - /* - * (no longer applies) - */ - if ((cow & MAP_NOFAULT) == 0) { - prev_object = prev_entry->object.vm_object; - default_pager_convert_to_swapq(prev_object); - } -#endif return (KERN_SUCCESS); } - else { - object = prev_entry->object.vm_object; - offset = prev_entry->offset + (prev_entry->end - - prev_entry->start); - vm_object_reference(object); - } + /* + * If we can extend the object but cannot extend the + * map entry, we have to create a new map entry. We + * must bump the ref count on the extended object to + * account for it. + */ + object = prev_entry->object.vm_object; + offset = prev_entry->offset + + (prev_entry->end - prev_entry->start); + vm_object_reference(object); } } /* + * NOTE: if conditionals fail, object can be NULL here. This occurs + * in things like the buffer map where we manage kva but do not manage + * backing objects. + */ + + /* * Create a new entry */ @@ -549,14 +556,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, new_entry->offset = offset; new_entry->avail_ssize = 0; - if (object) { - if ((object->ref_count > 1) || (object->shadow_count != 0)) { - vm_object_clear_flag(object, OBJ_ONEMAPPING); - } else { - vm_object_set_flag(object, OBJ_ONEMAPPING); - } - } - if (map->is_main_map) { new_entry->inheritance = VM_INHERIT_DEFAULT; new_entry->protection = prot; @@ -577,12 +576,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, (prev_entry->end >= new_entry->start)) map->first_free = new_entry; -#if 0 - /* - * (no longer applies) - */ - default_pager_convert_to_swapq(object); -#endif return (KERN_SUCCESS); } @@ -853,6 +846,8 @@ vm_map_findspace(map, start, length, addr) * first-fit from the specified address; the region found is * returned in the same parameter. * + * If object is non-NULL, ref count must be bumped by caller + * prior to making call to account for the new entry. */ int vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index bb95ec2..250f727 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_object.c,v 1.147 1999/02/08 05:15:54 dillon Exp $ + * $Id: vm_object.c,v 1.148 1999/02/08 19:00:15 dillon Exp $ */ /* @@ -762,8 +762,8 @@ vm_object_madvise(object, pindex, count, advise) end = pindex + count; /* - * MADV_FREE special case - free any swap backing store (as well - * as resident pages later on). + * MADV_FREE special case - free any swap backing store now, + * whether or not resident pages can be found later. */ if (advise == MADV_FREE) { @@ -835,24 +835,18 @@ shadowlookup: vm_page_deactivate(m); } else if (advise == MADV_FREE) { /* - * If MADV_FREE_FORCE_FREE is defined, we attempt to - * immediately free the page. Otherwise we just - * destroy any swap backing store, mark it clean, - * and stuff it into the cache. + * Mark the page clean. This will allow the page + * to be freed up by the system. However, such pages + * are often reused quickly by malloc()/free() + * so we do not do anything that would cause + * a page fault if we can help it. + * + * Specifically, we do not try to actually free + * the page now nor do we try to put it in the + * cache (which would cause a page fault on reuse). */ pmap_clear_modify(VM_PAGE_TO_PHYS(m)); m->dirty = 0; - -#ifdef MADV_FREE_FORCE_FREE - if (tobject->resident_page_count > 1) { - vm_page_busy(m); - vm_page_protect(m, VM_PROT_NONE); - vm_page_free(m); - } else -#endif - { - vm_page_cache(m); - } } } } |