summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1998-01-12 01:46:33 +0000
committerdyson <dyson@FreeBSD.org>1998-01-12 01:46:33 +0000
commitd9d8bf6d30a04d79ca9d60f80000053eb3f8b46d (patch)
tree10eadac88cb07ecf3dc243b80c17784560678271 /sys/vm
parente790f0b45536af6e666eb06f1c410cb84ee8733c (diff)
downloadFreeBSD-src-d9d8bf6d30a04d79ca9d60f80000053eb3f8b46d.zip
FreeBSD-src-d9d8bf6d30a04d79ca9d60f80000053eb3f8b46d.tar.gz
Fix some vnode management problems, and better mgmt of vnode free list.
Fix the UIO optimization code. Fix an assumption in vm_map_insert regarding allocation of swap pagers. Fix an spl problem in the collapse handling in vm_object_deallocate. When pages are freed from vnode objects, and the criteria for putting the associated vnode onto the free list is reached, either put the vnode onto the list, or put it onto an interrupt safe version of the list, for further transfer onto the actual free list. Some minor syntax changes changing pre-decs, pre-incs to post versions. Remove a bogus timeout (that I added for debugging) from vn_lock. PHK will likely still have problems with the vnode list management, and so do I, but it is better than it was.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c4
-rw-r--r--sys/vm/vm_map.c154
-rw-r--r--sys/vm/vm_object.c4
-rw-r--r--sys/vm/vm_page.c20
-rw-r--r--sys/vm/vm_pageout.c40
-rw-r--r--sys/vm/vm_pageout.h3
6 files changed, 170 insertions, 55 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index f45d377..2185e33 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.72 1997/12/19 09:03:10 dyson Exp $
+ * $Id: vm_fault.c,v 1.73 1998/01/06 05:25:54 dyson Exp $
*/
/*
@@ -523,8 +523,10 @@ readrest:
}
}
+#if defined(DIAGNOSTIC)
if ((m->flags & PG_BUSY) == 0)
panic("vm_fault: not busy after main loop");
+#endif
/*
* PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 17b0e75..67039d4 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.103 1997/12/29 01:03:34 dyson Exp $
+ * $Id: vm_map.c,v 1.104 1998/01/06 05:25:58 dyson Exp $
*/
/*
@@ -2405,7 +2405,7 @@ RetryLookup:;
vm_map_lock_downgrade(share_map);
}
- if (entry->object.vm_object != NULL)
+ if (entry->object.vm_object->type == OBJT_DEFAULT)
default_pager_convert_to_swapq(entry->object.vm_object);
/*
* Return the object/offset from this entry. If the entry was
@@ -2479,16 +2479,20 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
vm_offset_t uaddr, start, end;
vm_pindex_t first_pindex, osize, oindex;
off_t ooffset;
+ int skipinit, allremoved;
if (npages)
*npages = 0;
+ allremoved = 0;
+
while (cnt > 0) {
map = mapa;
uaddr = uaddra;
+ skipinit = 0;
if ((vm_map_lookup(&map, uaddr,
- VM_PROT_READ|VM_PROT_WRITE, &first_entry, &first_object,
+ VM_PROT_READ, &first_entry, &first_object,
&first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
return EFAULT;
}
@@ -2506,17 +2510,16 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
osize = atop(tcnt);
+ oindex = OFF_TO_IDX(cp);
if (npages) {
- vm_pindex_t src_index, idx;
- src_index = OFF_TO_IDX(cp);
+ vm_pindex_t idx;
for (idx = 0; idx < osize; idx++) {
vm_page_t m;
- if ((m = vm_page_lookup(srcobject, src_index + idx)) == NULL) {
+ if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
vm_map_lookup_done(map, first_entry);
return 0;
}
- if ((m->flags & PG_BUSY) || m->busy ||
- m->hold_count || m->wire_count ||
+ if ((m->flags & PG_BUSY) ||
((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
vm_map_lookup_done(map, first_entry);
return 0;
@@ -2524,46 +2527,113 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
}
}
- oindex = OFF_TO_IDX(first_entry->offset);
-
/*
* If we are changing an existing map entry, just redirect
* the object, and change mappings.
*/
- if ((first_object->ref_count == 1) &&
- (first_object->backing_object == srcobject) &&
+ if (first_object->type == OBJT_VNODE) {
+
+ if (first_object != srcobject) {
+
+ vm_object_deallocate(first_object);
+ srcobject->flags |= OBJ_OPT;
+ vm_object_reference(srcobject);
+
+ first_entry->object.vm_object = srcobject;
+ first_entry->offset = cp;
+
+ } else if (first_entry->offset != cp) {
+
+ first_entry->offset = cp;
+
+ } else {
+
+ skipinit = 1;
+
+ }
+
+ if (skipinit == 0) {
+ /*
+ * Remove old window into the file
+ */
+ if (!allremoved) {
+ pmap_remove (map->pmap, uaddra, uaddra + cnt);
+ allremoved = 1;
+ }
+
+ /*
+ * Force copy on write for mmaped regions
+ */
+ vm_object_pmap_copy_1 (srcobject,
+ oindex, oindex + osize);
+ }
+
+ } else if ((first_object->ref_count == 1) &&
(first_object->size == osize) &&
(first_object->resident_page_count == 0)) {
+ vm_object_t oldobject;
- /*
- * Remove old window into the file
- */
- pmap_remove (map->pmap, start, end);
+ oldobject = first_object->backing_object;
- /*
- * Force copy on write for mmaped regions
- */
- vm_object_pmap_copy_1 (first_object,
- oindex, oindex + osize);
+ if ((first_object->backing_object_offset != cp) ||
+ (oldobject != srcobject)) {
+ /*
+ * Remove old window into the file
+ */
+ if (!allremoved) {
+ pmap_remove (map->pmap, uaddra, uaddra + cnt);
+ allremoved = 1;
+ }
- /*
- * Point the object appropriately
- */
- first_object->backing_object_offset = cp;
+ /*
+ * Force copy on write for mmaped regions
+ */
+ vm_object_pmap_copy_1 (srcobject,
+ oindex, oindex + osize);
+
+ /*
+ * Point the object appropriately
+ */
+ if (oldobject != srcobject) {
+ /*
+ * Set the object optimization hint flag
+ */
+ srcobject->flags |= OBJ_OPT;
+ vm_object_reference(srcobject);
+
+ if (oldobject) {
+ TAILQ_REMOVE(&oldobject->shadow_head,
+ first_object, shadow_list);
+ oldobject->shadow_count--;
+ if (oldobject->shadow_count == 0)
+ oldobject->flags &= ~OBJ_OPT;
+ vm_object_deallocate(oldobject);
+ }
+
+ TAILQ_INSERT_TAIL(&srcobject->shadow_head,
+ first_object, shadow_list);
+ srcobject->shadow_count++;
+
+ first_object->backing_object = srcobject;
+ }
+
+ first_object->backing_object_offset = cp;
+ } else {
+ skipinit = 1;
+ }
/*
* Otherwise, we have to do a logical mmap.
*/
} else {
- object = srcobject;
- object->flags |= OBJ_OPT;
- vm_object_reference(object);
- ooffset = cp;
-
- vm_object_shadow(&object, &ooffset, osize);
+ srcobject->flags |= OBJ_OPT;
+ vm_object_reference(srcobject);
- pmap_remove (map->pmap, start, end);
- vm_object_pmap_copy_1 (first_object,
+ if (!allremoved) {
+ pmap_remove (map->pmap, uaddra, uaddra + cnt);
+ allremoved = 1;
+ }
+ vm_object_pmap_copy_1 (srcobject,
oindex, oindex + osize);
vm_map_lookup_done(map, first_entry);
@@ -2578,8 +2648,8 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
SAVE_HINT(map, first_entry->prev);
vm_map_entry_delete(map, first_entry);
- rv = vm_map_insert(map, object, 0, start, end,
- VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
+ rv = vm_map_insert(map, srcobject, cp, start, end,
+ VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE | MAP_COPY_NEEDED);
if (rv != KERN_SUCCESS)
panic("vm_uiomove: could not insert new entry: %d", rv);
@@ -2588,8 +2658,9 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
/*
* Map the window directly, if it is already in memory
*/
- pmap_object_init_pt(map->pmap, start,
- srcobject, (vm_pindex_t) OFF_TO_IDX(cp), end - start, 1);
+ if (!skipinit)
+ pmap_object_init_pt(map->pmap, start,
+ srcobject, (vm_pindex_t) OFF_TO_IDX(cp), end - start, 0);
vm_map_unlock(map);
@@ -2663,10 +2734,14 @@ vm_freeze_copyopts(object, froma, toa)
continue;
vm_object_reference(robject);
+
+ s = splvm();
while (robject->paging_in_progress) {
robject->flags |= OBJ_PIPWNT;
tsleep(robject, PVM, "objfrz", 0);
}
+ splx(s);
+
if (robject->ref_count == 1) {
vm_object_deallocate(robject);
continue;
@@ -2690,7 +2765,7 @@ vm_freeze_copyopts(object, froma, toa)
continue;
if( m_in->flags & PG_BUSY) {
- s = splhigh();
+ s = splvm();
while (m_in && (m_in->flags & PG_BUSY)) {
m_in->flags |= PG_WANTED;
tsleep(m_in, PVM, "pwtfrz", 0);
@@ -2705,7 +2780,7 @@ vm_freeze_copyopts(object, froma, toa)
retryout:
m_out = vm_page_lookup(robject, dstpindex);
if( m_out && (m_out->flags & PG_BUSY)) {
- s = splhigh();
+ s = splvm();
while (m_out && (m_out->flags & PG_BUSY)) {
m_out->flags |= PG_WANTED;
tsleep(m_out, PVM, "pwtfrz", 0);
@@ -2733,6 +2808,7 @@ retryout:
vm_object_pip_wakeup(robject);
if (((from - bo_pindex) == 0) && ((to - bo_pindex) == robject->size)) {
+
object->shadow_count--;
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 7885534..33ec0a1 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.104 1998/01/06 05:26:04 dyson Exp $
+ * $Id: vm_object.c,v 1.105 1998/01/07 03:12:19 dyson Exp $
*/
/*
@@ -332,12 +332,14 @@ vm_object_deallocate(object)
if (robject->paging_in_progress) {
robject->flags |= OBJ_PIPWNT;
tsleep(robject, PVM, "objde1", 0);
+ splx(s);
goto retry;
}
if (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
tsleep(object, PVM, "objde2", 0);
+ splx(s);
goto retry;
}
splx(s);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 3b36526..ff824d6 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.83 1997/11/06 08:35:50 dyson Exp $
+ * $Id: vm_page.c,v 1.84 1997/12/29 00:24:58 dyson Exp $
*/
/*
@@ -753,6 +753,7 @@ vm_page_alloc(object, pindex, page_req)
{
register vm_page_t m;
struct vpgqueues *pq;
+ vm_object_t oldobject;
int queue, qtype;
int s;
@@ -861,9 +862,11 @@ vm_page_alloc(object, pindex, page_req)
TAILQ_REMOVE(pq->pl, m, pageq);
--(*pq->cnt);
--(*pq->lcnt);
+ oldobject = NULL;
if (qtype == PQ_ZERO) {
m->flags = PG_ZERO|PG_BUSY;
} else if (qtype == PQ_CACHE) {
+ oldobject = m->object;
vm_page_remove(m);
m->flags = PG_BUSY;
} else {
@@ -891,6 +894,19 @@ vm_page_alloc(object, pindex, page_req)
(cnt.v_free_count < cnt.v_pageout_free_min))
pagedaemon_wakeup();
+ if (((page_req == VM_ALLOC_NORMAL) || (page_req == VM_ALLOC_ZERO)) &&
+ oldobject &&
+ ((oldobject->type == OBJT_VNODE) &&
+ (oldobject->ref_count == 0) &&
+ (oldobject->resident_page_count == 0))) {
+ struct vnode *vp;
+ vp = (struct vnode *) oldobject->handle;
+ if (VSHOULDFREE(vp)) {
+ vm_object_reference(oldobject);
+ vm_object_vndeallocate(oldobject);
+ }
+ }
+
return (m);
}
@@ -954,6 +970,7 @@ static int
vm_page_freechk_and_unqueue(m)
vm_page_t m;
{
+#if !defined(MAX_PERF)
if (m->busy ||
(m->flags & PG_BUSY) ||
((m->queue - m->pc) == PQ_FREE) ||
@@ -966,6 +983,7 @@ vm_page_freechk_and_unqueue(m)
else
panic("vm_page_free: freeing busy page");
}
+#endif
vm_page_remove(m);
vm_page_unqueue_nowakeup(m);
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index eda7f30..b5669fd 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.105 1997/12/29 00:25:03 dyson Exp $
+ * $Id: vm_pageout.c,v 1.106 1998/01/06 05:26:11 dyson Exp $
*/
/*
@@ -382,10 +382,10 @@ vm_pageout_flush(mc, count, sync)
switch (pageout_status[i]) {
case VM_PAGER_OK:
- ++anyok;
+ anyok++;
break;
case VM_PAGER_PEND:
- ++anyok;
+ anyok++;
break;
case VM_PAGER_BAD:
/*
@@ -592,6 +592,23 @@ vm_pageout_map_deactivate_pages(map, desired)
}
#endif
+void
+vm_pageout_page_free(vm_page_t m) {
+ vm_object_t objref = NULL;
+
+ m->flags |= PG_BUSY;
+ if (m->object->type == OBJT_VNODE) {
+ objref = m->object;
+ vm_object_reference(objref);
+ }
+ vm_page_protect(m, VM_PROT_NONE);
+ PAGE_WAKEUP(m);
+ vm_page_free(m);
+ if (objref) {
+ vm_object_vndeallocate(objref);
+ }
+}
+
/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
@@ -716,17 +733,16 @@ rescan0:
* Invalid pages can be easily freed
*/
if (m->valid == 0) {
- vm_page_protect(m, VM_PROT_NONE);
- vm_page_free(m);
+ vm_pageout_page_free(m);
cnt.v_dfree++;
- ++pages_freed;
+ pages_freed++;
/*
* Clean pages can be placed onto the cache queue.
*/
} else if (m->dirty == 0) {
vm_page_cache(m);
- ++pages_freed;
+ pages_freed++;
/*
* Dirty pages need to be paged out. Note that we clean
@@ -774,7 +790,7 @@ rescan0:
splx(s);
}
if (object->flags & OBJ_MIGHTBEDIRTY)
- ++vnodes_skipped;
+ vnodes_skipped++;
continue;
}
@@ -784,7 +800,7 @@ rescan0:
*/
if (m->queue != PQ_INACTIVE) {
if (object->flags & OBJ_MIGHTBEDIRTY)
- ++vnodes_skipped;
+ vnodes_skipped++;
vput(vp);
continue;
}
@@ -808,7 +824,7 @@ rescan0:
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
splx(s);
if (object->flags & OBJ_MIGHTBEDIRTY)
- ++vnodes_skipped;
+ vnodes_skipped++;
vput(vp);
continue;
}
@@ -922,7 +938,7 @@ rescan0:
m->act_count -= min(m->act_count, ACT_DECLINE);
if (vm_pageout_algorithm_lru ||
(m->object->ref_count == 0) || (m->act_count == 0)) {
- --page_shortage;
+ page_shortage--;
if (m->object->ref_count == 0) {
vm_page_protect(m, VM_PROT_NONE);
if (m->dirty == 0)
@@ -953,7 +969,7 @@ rescan0:
if (!m)
break;
cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
- vm_page_free(m);
+ vm_pageout_page_free(m);
cnt.v_dfree++;
}
splx(s);
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index ab2608a..4a61d12 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.h,v 1.20 1997/02/22 09:48:34 peter Exp $
+ * $Id: vm_pageout.h,v 1.21 1997/12/06 02:23:36 dyson Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@@ -105,6 +105,7 @@ extern void vm_wait __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
int vm_pageout_flush __P((vm_page_t *, int, int));
+void vm_pageout_page_free __P((vm_page_t));
#endif
OpenPOWER on IntegriCloud