summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1998-01-06 05:26:17 +0000
committerdyson <dyson@FreeBSD.org>1998-01-06 05:26:17 +0000
commitcb2800cd94015c1a5a07a78ac1299961c8cbfee8 (patch)
tree458fd90f400f25f9120e71fd368963d5190181bb /sys/vm
parent082257799eb016d17f2ea10684dc8c250c8dce19 (diff)
downloadFreeBSD-src-cb2800cd94015c1a5a07a78ac1299961c8cbfee8.zip
FreeBSD-src-cb2800cd94015c1a5a07a78ac1299961c8cbfee8.tar.gz
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object no longer includes reference counts. The major difference is that vm_object's are no longer freed gratuitiously from the vnode, and so once an object is created for the vnode, it will last as long as the vnode does. When a vnode object reference count is incremented, then the underlying vnode reference count is incremented also. The two "objects" are now more intimately related, and so the interactions are now much less complex. When vnodes are now normally placed onto the free queue with an object still attached. The rundown of the object happens at vnode rundown time, and happens with exactly the same filesystem semantics of the original VFS code. There is absolutely no need for vnode_pager_uncache and other travesties like that anymore. A side-effect of these changes is that SMP locking should be much simpler, the I/O copyin/copyout optimizations work, NFS should be more ponderable, and further work on layered filesystems should be less frustrating, because of the totally coherent management of the vnode objects and vnodes. Please be careful with your system while running this code, but I would greatly appreciate feedback as soon a reasonably possible.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c21
-rw-r--r--sys/vm/vm_map.c114
-rw-r--r--sys/vm/vm_map.h4
-rw-r--r--sys/vm/vm_object.c246
-rw-r--r--sys/vm/vm_object.h13
-rw-r--r--sys/vm/vm_pageout.c6
-rw-r--r--sys/vm/vnode_pager.c31
7 files changed, 245 insertions, 190 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index a14512d..f45d377 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.71 1997/09/01 03:17:15 bde Exp $
+ * $Id: vm_fault.c,v 1.72 1997/12/19 09:03:10 dyson Exp $
*/
/*
@@ -222,6 +222,15 @@ RetryFault:;
}
}
+ /*
+ * Make a reference to this object to prevent its disposal while we
+ * are messing with it. Once we have the reference, the map is free
+ * to be diddled. Since objects reference their shadows (and copies),
+ * they will stay around as well.
+ */
+ vm_object_reference(first_object);
+ first_object->paging_in_progress++;
+
vp = vnode_pager_lock(first_object);
if ((fault_type & VM_PROT_WRITE) &&
(first_object->type == OBJT_VNODE)) {
@@ -236,16 +245,6 @@ RetryFault:;
first_m = NULL;
/*
- * Make a reference to this object to prevent its disposal while we
- * are messing with it. Once we have the reference, the map is free
- * to be diddled. Since objects reference their shadows (and copies),
- * they will stay around as well.
- */
-
- first_object->ref_count++;
- first_object->paging_in_progress++;
-
- /*
* INVARIANTS (through entire routine):
*
* 1) At all times, we must either have the object lock or a busy
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 11604ce..17b0e75 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.102 1997/12/29 00:24:43 dyson Exp $
+ * $Id: vm_map.c,v 1.103 1997/12/29 01:03:34 dyson Exp $
*/
/*
@@ -558,6 +558,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
if ((object == NULL) &&
(prev_entry != &map->header) &&
(( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
+ ((prev_entry->object.vm_object == NULL) ||
+ (prev_entry->object.vm_object->type == OBJT_DEFAULT)) &&
(prev_entry->end == start) &&
(prev_entry->wired_count == 0)) {
@@ -757,7 +759,8 @@ vm_map_simplify_entry(map, entry)
prevsize = prev->end - prev->start;
if ( (prev->end == entry->start) &&
(prev->object.vm_object == entry->object.vm_object) &&
- (!prev->object.vm_object || (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
+ (!prev->object.vm_object ||
+ (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
(!prev->object.vm_object ||
(prev->offset + prevsize == entry->offset)) &&
(prev->eflags == entry->eflags) &&
@@ -783,7 +786,8 @@ vm_map_simplify_entry(map, entry)
esize = entry->end - entry->start;
if ((entry->end == next->start) &&
(next->object.vm_object == entry->object.vm_object) &&
- (!next->object.vm_object || (next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
+ (!next->object.vm_object ||
+ (next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
(!entry->object.vm_object ||
(entry->offset + esize == next->offset)) &&
(next->eflags == entry->eflags) &&
@@ -2012,7 +2016,7 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
(src_entry->object.vm_object->type == OBJT_DEFAULT ||
src_entry->object.vm_object->type == OBJT_SWAP))
vm_object_collapse(src_entry->object.vm_object);
- ++src_entry->object.vm_object->ref_count;
+ vm_object_reference(src_entry->object.vm_object);
src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->object.vm_object =
@@ -2099,7 +2103,7 @@ vmspace_fork(vm1)
new_entry = vm_map_entry_create(new_map);
*new_entry = *old_entry;
new_entry->wired_count = 0;
- ++object->ref_count;
+ vm_object_reference(object);
/*
* Insert the entry into the new map -- we know we're
@@ -2458,12 +2462,13 @@ vm_map_lookup_done(map, entry)
* operations.
*/
int
-vm_uiomove(mapa, srcobject, cp, cnt, uaddra)
+vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
vm_map_t mapa;
vm_object_t srcobject;
off_t cp;
int cnt;
vm_offset_t uaddra;
+ int *npages;
{
vm_map_t map;
vm_object_t first_object, object;
@@ -2475,6 +2480,9 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra)
vm_pindex_t first_pindex, osize, oindex;
off_t ooffset;
+ if (npages)
+ *npages = 0;
+
while (cnt > 0) {
map = mapa;
uaddr = uaddra;
@@ -2485,11 +2493,6 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra)
return EFAULT;
}
-#if 0
- printf("foff: 0x%x, uaddr: 0x%x\norig entry: (0x%x, 0x%x), ",
- (int) cp, uaddr, first_entry->start, first_entry->end);
-#endif
-
vm_map_clip_start(map, first_entry, uaddr);
tcnt = cnt;
@@ -2500,11 +2503,27 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra)
start = first_entry->start;
end = first_entry->end;
-#if 0
- printf("new entry: (0x%x, 0x%x)\n", start, end);
-#endif
osize = atop(tcnt);
+
+ if (npages) {
+ vm_pindex_t src_index, idx;
+ src_index = OFF_TO_IDX(cp);
+ for (idx = 0; idx < osize; idx++) {
+ vm_page_t m;
+ if ((m = vm_page_lookup(srcobject, src_index + idx)) == NULL) {
+ vm_map_lookup_done(map, first_entry);
+ return 0;
+ }
+ if ((m->flags & PG_BUSY) || m->busy ||
+ m->hold_count || m->wire_count ||
+ ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
+ vm_map_lookup_done(map, first_entry);
+ return 0;
+ }
+ }
+ }
+
oindex = OFF_TO_IDX(first_entry->offset);
/*
@@ -2538,7 +2557,7 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra)
object = srcobject;
object->flags |= OBJ_OPT;
- object->ref_count++;
+ vm_object_reference(object);
ooffset = cp;
vm_object_shadow(&object, &ooffset, osize);
@@ -2577,6 +2596,8 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra)
cnt -= tcnt;
uaddra += tcnt;
cp += tcnt;
+ if (npages)
+ *npages += osize;
}
return 0;
}
@@ -2616,14 +2637,12 @@ vm_freeze_copyopts(object, froma, toa)
int s;
vm_object_t robject, robjectn;
vm_pindex_t idx, from, to;
+ return;
- if (vfs_ioopt == 0 || (object == NULL) || ((object->flags & OBJ_OPT) == 0))
+ if ((vfs_ioopt == 0) || (object == NULL) ||
+ ((object->flags & OBJ_OPT) == 0))
return;
-#if 0
- printf("sc: %d, rc: %d\n", object->shadow_count, object->ref_count);
-#endif
-
if (object->shadow_count > object->ref_count)
panic("vm_freeze_copyopts: sc > rc");
@@ -2643,7 +2662,7 @@ vm_freeze_copyopts(object, froma, toa)
if ((bo_pindex + robject->size) < froma)
continue;
- robject->ref_count++;
+ vm_object_reference(robject);
while (robject->paging_in_progress) {
robject->flags |= OBJ_PIPWNT;
tsleep(robject, PVM, "objfrz", 0);
@@ -2714,9 +2733,6 @@ retryout:
vm_object_pip_wakeup(robject);
if (((from - bo_pindex) == 0) && ((to - bo_pindex) == robject->size)) {
-#if 0
- printf("removing obj: %d, %d\n", object->shadow_count, object->ref_count);
-#endif
object->shadow_count--;
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
@@ -2729,9 +2745,8 @@ retryout:
vm_object_deallocate(object);
vm_object_deallocate(robject);
return;
- } else {
- object->ref_count--;
}
+ vm_object_deallocate(object);
}
vm_object_deallocate(robject);
}
@@ -2750,16 +2765,18 @@ retryout:
*/
DB_SHOW_COMMAND(map, vm_map_print)
{
+ static int nlines;
/* XXX convert args. */
register vm_map_t map = (vm_map_t)addr;
boolean_t full = have_addr;
register vm_map_entry_t entry;
- db_iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
+ db_iprintf("%s map 0x%x: pmap=0x%x, ref=%d, nentries=%d, version=%d\n",
(map->is_main_map ? "Task" : "Share"),
(int) map, (int) (map->pmap), map->ref_count, map->nentries,
map->timestamp);
+ nlines++;
if (!full && db_indent)
return;
@@ -2767,23 +2784,34 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_indent += 2;
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
- db_iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
+#if 0
+ if (nlines > 18) {
+ db_printf("--More--");
+ cngetc();
+ db_printf("\r");
+ nlines = 0;
+ }
+#endif
+
+ db_iprintf("map entry 0x%x: start=0x%x, end=0x%x\n",
(int) entry, (int) entry->start, (int) entry->end);
+ nlines++;
if (map->is_main_map) {
static char *inheritance_name[4] =
{"share", "copy", "none", "donate_copy"};
- db_printf("prot=%x/%x/%s, ",
+ db_iprintf(" prot=%x/%x/%s",
entry->protection,
entry->max_protection,
inheritance_name[entry->inheritance]);
if (entry->wired_count != 0)
- db_printf("wired, ");
+ db_printf(", wired");
}
if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
- db_printf("share=0x%x, offset=0x%x\n",
+ db_printf(", share=0x%x, offset=0x%x\n",
(int) entry->object.share_map,
(int) entry->offset);
+ nlines++;
if ((entry->prev == &map->header) ||
((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
(entry->prev->object.share_map !=
@@ -2794,13 +2822,14 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_indent -= 2;
}
} else {
- db_printf("object=0x%x, offset=0x%x",
+ db_printf(", object=0x%x, offset=0x%x",
(int) entry->object.vm_object,
(int) entry->offset);
if (entry->eflags & MAP_ENTRY_COW)
db_printf(", copy (%s)",
(entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
db_printf("\n");
+ nlines++;
if ((entry->prev == &map->header) ||
(entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
@@ -2809,10 +2838,31 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_indent += 2;
vm_object_print((int)entry->object.vm_object,
full, 0, (char *)0);
+ nlines += 4;
db_indent -= 2;
}
}
}
db_indent -= 2;
+ if (db_indent == 0)
+ nlines = 0;
}
+
+
+DB_SHOW_COMMAND(procvm, procvm)
+{
+ struct proc *p;
+
+ if (have_addr) {
+ p = (struct proc *) addr;
+ } else {
+ p = curproc;
+ }
+
+ printf("p = 0x%x, vmspace = 0x%x, map = 0x%x, pmap = 0x%x\n",
+ p, p->p_vmspace, &p->p_vmspace->vm_map, &p->p_vmspace->vm_pmap);
+
+ vm_map_print ((int) &p->p_vmspace->vm_map, 1, 0, NULL);
+}
+
#endif /* DDB */
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 666205b..d70a2b1 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.h,v 1.28 1997/08/18 02:06:24 dyson Exp $
+ * $Id: vm_map.h,v 1.29 1997/12/19 09:03:12 dyson Exp $
*/
/*
@@ -336,7 +336,7 @@ int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
void vm_map_madvise __P((vm_map_t, pmap_t, vm_offset_t, vm_offset_t, int));
void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_init2 __P((void));
-int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t));
+int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
#endif
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 221d7fd..a279525 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.102 1997/12/19 09:03:14 dyson Exp $
+ * $Id: vm_object.c,v 1.103 1997/12/29 00:24:49 dyson Exp $
*/
/*
@@ -94,7 +94,6 @@ static void vm_object_qcollapse __P((vm_object_t object));
#ifdef not_used
static void vm_object_deactivate_pages __P((vm_object_t));
#endif
-static void vm_object_terminate __P((vm_object_t));
/*
* Virtual memory objects maintain the actual data
@@ -236,18 +235,36 @@ vm_object_reference(object)
{
if (object == NULL)
return;
+
+#if defined(DIAGNOSTIC)
+ if (object->flags & OBJ_DEAD)
+ panic("vm_object_reference: attempting to reference dead obj");
+#endif
+
+ object->ref_count++;
+ if (object->type == OBJT_VNODE)
+ vget((struct vnode *) object->handle, LK_NOOBJ, curproc);
+}
+
+inline void
+vm_object_vndeallocate(object)
+ vm_object_t object;
+{
+ struct vnode *vp = (struct vnode *) object->handle;
+#if defined(DIAGNOSTIC)
+ if (object->type != OBJT_VNODE)
+ panic("vm_object_vndeallocate: not a vnode object");
+ if (vp == NULL)
+ panic("vm_object_vndeallocate: missing vp");
if (object->ref_count == 0) {
- panic("vm_object_reference: attempting to reference deallocated obj");
+ vprint("vm_object_vndeallocate", vp);
+ panic("vm_object_vndeallocate: bad object reference count");
}
- object->ref_count++;
- if ((object->type == OBJT_VNODE) && (object->flags & OBJ_VFS_REF)) {
- struct vnode *vp;
- vp = (struct vnode *)object->handle;
- simple_lock(&vp->v_interlock);
- if (vp->v_flag & VOBJREF)
- vp->v_flag |= VOBJREF;
- ++vp->v_usecount;
- simple_unlock(&vp->v_interlock);
+#endif
+
+ object->ref_count--;
+ if (object->type == OBJT_VNODE) {
+ vrele(vp);
}
}
@@ -266,11 +283,16 @@ void
vm_object_deallocate(object)
vm_object_t object;
{
+ int s;
vm_object_t temp;
- struct vnode *vp;
while (object != NULL) {
+ if (object->type == OBJT_VNODE) {
+ vm_object_vndeallocate(object);
+ return;
+ }
+
if (object->ref_count == 0) {
panic("vm_object_deallocate: object deallocated too many times");
} else if (object->ref_count > 2) {
@@ -282,94 +304,68 @@ vm_object_deallocate(object)
* Here on ref_count of one or two, which are special cases for
* objects.
*/
- vp = NULL;
- if (object->type == OBJT_VNODE) {
- vp = (struct vnode *)object->handle;
- if (vp->v_flag & VOBJREF) {
- if (object->ref_count < 2) {
- panic("vm_object_deallocate: "
- "not enough references for OBJT_VNODE: %d",
- object->ref_count);
- } else {
+ if ((object->ref_count == 2) && (object->shadow_count == 1)) {
- /*
- * Freeze optimized copies.
- */
- vm_freeze_copyopts(object, 0, object->size);
-
- /*
- * Loose our reference to the vnode.
- */
- vp->v_flag &= ~VOBJREF;
- vrele(vp);
- }
- }
- }
-
- /*
- * Lose the reference
- */
- if (object->ref_count == 2) {
object->ref_count--;
if ((object->handle == NULL) &&
(object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP)) {
vm_object_t robject;
+
robject = TAILQ_FIRST(&object->shadow_head);
- if ((robject != NULL) &&
- (robject->handle == NULL) &&
+#if defined(DIAGNOSTIC)
+ if (robject == NULL)
+ panic("vm_object_deallocate: ref_count: %d,"
+ " shadow_count: %d",
+ object->ref_count, object->shadow_count);
+#endif
+ if ((robject->handle == NULL) &&
(robject->type == OBJT_DEFAULT ||
robject->type == OBJT_SWAP)) {
- int s;
- robject->ref_count += 2;
- object->ref_count += 2;
-
- do {
- s = splvm();
- while (robject->paging_in_progress) {
- robject->flags |= OBJ_PIPWNT;
- tsleep(robject, PVM, "objde1", 0);
- }
-
- while (object->paging_in_progress) {
- object->flags |= OBJ_PIPWNT;
- tsleep(object, PVM, "objde2", 0);
- }
- splx(s);
- } while( object->paging_in_progress || robject->paging_in_progress);
+ robject->ref_count++;
+
+ retry:
+ s = splvm();
+ if (robject->paging_in_progress) {
+ robject->flags |= OBJ_PIPWNT;
+ tsleep(robject, PVM, "objde1", 0);
+ goto retry;
+ }
+
+ if (object->paging_in_progress) {
+ object->flags |= OBJ_PIPWNT;
+ tsleep(object, PVM, "objde2", 0);
+ goto retry;
+ }
+ splx(s);
- object->ref_count -= 2;
- robject->ref_count -= 2;
- if( robject->ref_count == 0) {
- robject->ref_count += 1;
+ if( robject->ref_count == 1) {
+ robject->ref_count--;
object = robject;
- continue;
+ goto doterm;
}
- vm_object_collapse(robject);
- return;
+
+ object = robject;
+ vm_object_collapse(object);
+ continue;
}
}
- /*
- * If there are still references, then we are done.
- */
- return;
- }
- /*
- * Make sure no one uses us.
- */
- object->flags |= OBJ_DEAD;
+ return;
- if (vp)
- vp->v_flag &= ~VTEXT;
+ } else {
+ object->ref_count--;
+ if (object->ref_count != 0)
+ return;
+ }
- object->ref_count--;
+doterm:
temp = object->backing_object;
if (temp) {
TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
- --temp->shadow_count;
+ temp->shadow_count--;
}
vm_object_terminate(object);
/* unlocks and deallocates object */
@@ -383,15 +379,17 @@ vm_object_deallocate(object)
*
* The object must be locked.
*/
-static void
+void
vm_object_terminate(object)
register vm_object_t object;
{
register vm_page_t p;
int s;
- if (object->flags & OBJ_VFS_REF)
- panic("vm_object_deallocate: freeing VFS_REF'ed object");
+ /*
+ * Make sure no one uses us.
+ */
+ object->flags |= OBJ_DEAD;
/*
* wait for the pageout daemon to be done with the object
@@ -403,29 +401,44 @@ vm_object_terminate(object)
}
splx(s);
+#if defined(DIAGNOSTIC)
if (object->paging_in_progress != 0)
panic("vm_object_deallocate: pageout in progress");
+#endif
/*
* Clean and free the pages, as appropriate. All references to the
* object are gone, so we don't need to lock it.
*/
if (object->type == OBJT_VNODE) {
- struct vnode *vp = object->handle;
+ struct vnode *vp;
+
+ /*
+ * Freeze optimized copies.
+ */
+ vm_freeze_copyopts(object, 0, object->size);
+
+ /*
+ * Clean pages and flush buffers.
+ */
vm_object_page_clean(object, 0, 0, TRUE);
+
+ vp = (struct vnode *) object->handle;
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
- }
- /*
- * Now free the pages. For internal objects, this also removes them
- * from paging queues.
- */
- while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
- if (p->busy || (p->flags & PG_BUSY))
- printf("vm_object_terminate: freeing busy page\n");
- PAGE_WAKEUP(p);
- vm_page_free(p);
- cnt.v_pfree++;
+ } else {
+
+ /*
+ * Now free the pages. For internal objects, this also removes them
+ * from paging queues.
+ */
+ while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
+ if (p->busy || (p->flags & PG_BUSY))
+ printf("vm_object_terminate: freeing busy page\n");
+ PAGE_WAKEUP(p);
+ vm_page_free(p);
+ cnt.v_pfree++;
+ }
}
/*
@@ -1122,6 +1135,7 @@ vm_object_collapse(object)
object_collapses++;
} else {
+ vm_object_t new_backing_object;
/*
* If all of the pages in the backing object are
* shadowed by the parent object, the parent object no
@@ -1173,25 +1187,26 @@ vm_object_collapse(object)
* it, since its reference count is at least 2.
*/
- TAILQ_REMOVE(&object->backing_object->shadow_head,
+ TAILQ_REMOVE(&backing_object->shadow_head,
object, shadow_list);
- --object->backing_object->shadow_count;
- vm_object_reference(object->backing_object = backing_object->backing_object);
- if (object->backing_object) {
- TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
+ --backing_object->shadow_count;
+
+ new_backing_object = backing_object->backing_object;
+ if (object->backing_object = new_backing_object) {
+ vm_object_reference(new_backing_object);
+ TAILQ_INSERT_TAIL(&new_backing_object->shadow_head,
object, shadow_list);
- ++object->backing_object->shadow_count;
+ ++new_backing_object->shadow_count;
+ object->backing_object_offset +=
+ backing_object->backing_object_offset;
}
- object->backing_object_offset += backing_object->backing_object_offset;
/*
* Drop the reference count on backing_object. Since
* its ref_count was at least 2, it will not vanish;
* so we don't need to call vm_object_deallocate.
*/
- if (backing_object->ref_count == 1)
- printf("should have called obj deallocate\n");
- backing_object->ref_count--;
+ vm_object_deallocate(backing_object);
object_bypasses++;
@@ -1220,18 +1235,20 @@ vm_object_page_remove(object, start, end, clean_only)
{
register vm_page_t p, next;
unsigned int size;
- int s;
+ int s, all;
if (object == NULL)
return;
+ all = ((end == 0) && (start == 0));
+
object->paging_in_progress++;
again:
size = end - start;
- if (size > 4 || size >= object->size / 4) {
+ if (all || size > 4 || size >= object->size / 4) {
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
next = TAILQ_NEXT(p, listq);
- if ((start <= p->pindex) && (p->pindex < end)) {
+ if (all || ((start <= p->pindex) && (p->pindex < end))) {
if (p->wire_count != 0) {
vm_page_protect(p, VM_PROT_NONE);
p->valid = 0;
@@ -1516,12 +1533,17 @@ DB_SHOW_COMMAND(object, vm_object_print_static)
if (object == NULL)
return;
- db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
- (int) object, (int) object->size,
- object->resident_page_count, object->ref_count);
- db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n",
+ db_iprintf("Object 0x%x: type=%d, size=0x%x, res=%d, ref=%d, flags=0x%x\n",
+ (int) object, (int) object->type, (int) object->size,
+ object->resident_page_count,
+ object->ref_count,
+ object->flags);
+ db_iprintf(" sref=%d, offset=0x%x, backing_object(%d)=(0x%x)+0x%x\n",
+ object->shadow_count,
(int) object->paging_offset,
- (int) object->backing_object, (int) object->backing_object_offset);
+ (((int)object->backing_object)?object->backing_object->ref_count:0),
+ (int) object->backing_object,
+ (int) object->backing_object_offset);
if (!full)
return;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index a13a5bf..ac86c6c 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.h,v 1.39 1997/12/19 09:03:16 dyson Exp $
+ * $Id: vm_object.h,v 1.40 1997/12/29 00:24:55 dyson Exp $
*/
/*
@@ -122,16 +122,13 @@ struct vm_object {
/*
* Flags
*/
-#define OBJ_CANPERSIST 0x0001 /* allow to persist */
#define OBJ_ACTIVE 0x0004 /* active objects */
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
-#define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
+#define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
#define OBJ_WRITEABLE 0x0080 /* object has been made writable */
-#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
+#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
#define OBJ_CLEANING 0x0200
-#define OBJ_VFS_REF 0x0400 /* object is refed by vfs layer */
-#define OBJ_VNODE_GONE 0x0800 /* vnode is gone */
-#define OBJ_OPT 0x1000 /* I/O optimization */
+#define OBJ_OPT 0x1000 /* I/O optimization */
#define OBJ_NORMAL 0x0 /* default behavior */
#define OBJ_SEQUENTIAL 0x1 /* expect sequential accesses */
@@ -170,6 +167,8 @@ boolean_t vm_object_coalesce __P((vm_object_t, vm_pindex_t, vm_size_t, vm_size_t
void vm_object_collapse __P((vm_object_t));
void vm_object_copy __P((vm_object_t, vm_pindex_t, vm_object_t *, vm_pindex_t *, boolean_t *));
void vm_object_deallocate __P((vm_object_t));
+void vm_object_terminate __P((vm_object_t));
+void vm_object_vndeallocate __P((vm_object_t));
void vm_object_init __P((void));
void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
void vm_object_page_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 99ee5a4..eda7f30 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.104 1997/12/24 15:05:25 dyson Exp $
+ * $Id: vm_pageout.c,v 1.105 1997/12/29 00:25:03 dyson Exp $
*/
/*
@@ -695,11 +695,7 @@ rescan0:
*/
if ((m->flags & PG_REFERENCED) != 0) {
m->flags &= ~PG_REFERENCED;
-#if 0
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
-#else
actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
-#endif
vm_page_activate(m);
m->act_count += (actcount + ACT_ADVANCE + 1);
continue;
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 360188a..f3ed776 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.77 1997/12/19 09:03:17 dyson Exp $
+ * $Id: vnode_pager.c,v 1.78 1997/12/29 00:25:11 dyson Exp $
*/
/*
@@ -140,26 +140,18 @@ vnode_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot,
* And an object of the appropriate size
*/
object = vm_object_allocate(OBJT_VNODE, size);
- if (vp->v_type == VREG)
- object->flags = OBJ_CANPERSIST;
- else
- object->flags = 0;
+ object->flags = 0;
object->un_pager.vnp.vnp_size = (vm_ooffset_t) size * PAGE_SIZE;
object->handle = handle;
vp->v_object = object;
+ vp->v_usecount++;
} else {
- /*
- * vm_object_reference() will remove the object from the cache if
- * found and gain a reference to the object.
- */
- vm_object_reference(object);
+ object->ref_count++;
+ vp->v_usecount++;
}
- if (vp->v_type == VREG)
- vp->v_flag |= VVMIO;
-
vp->v_flag &= ~VOLOCK;
if (vp->v_flag & VOWANT) {
vp->v_flag &= ~VOWANT;
@@ -186,10 +178,11 @@ vnode_pager_dealloc(object)
splx(s);
}
+ object->flags |= OBJ_DEAD;
object->handle = NULL;
-
+ object->type = OBJT_DEFAULT;
vp->v_object = NULL;
- vp->v_flag &= ~(VTEXT | VVMIO);
+ vp->v_flag &= ~(VTEXT|VOBJBUF);
}
static boolean_t
@@ -541,8 +534,7 @@ vnode_pager_getpages(object, m, count, reqpage)
{
int rtval;
struct vnode *vp;
- if (object->flags & OBJ_VNODE_GONE)
- return VM_PAGER_ERROR;
+
vp = object->handle;
rtval = VOP_GETPAGES(vp, m, count*PAGE_SIZE, reqpage, 0);
if (rtval == EOPNOTSUPP)
@@ -643,7 +635,7 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
IDX_TO_OFF(m[i]->pindex), &runpg);
if (firstaddr == -1) {
if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
- panic("vnode_pager_putpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d",
+ panic("vnode_pager_getpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d",
firstaddr, foff, object->un_pager.vnp.vnp_size);
}
vnode_pager_freepage(m[i]);
@@ -792,9 +784,6 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
int rtval;
struct vnode *vp;
- if (object->flags & OBJ_VNODE_GONE)
- return VM_PAGER_ERROR;
-
vp = object->handle;
rtval = VOP_PUTPAGES(vp, m, count*PAGE_SIZE, sync, rtvals, 0);
if (rtval == EOPNOTSUPP)
OpenPOWER on IntegriCloud