diff options
-rw-r--r-- | sys/conf/NOTES | 15 | ||||
-rw-r--r-- | sys/conf/options | 1 | ||||
-rw-r--r-- | sys/kern/kern_subr.c | 108 | ||||
-rw-r--r-- | sys/kern/vfs_subr.c | 6 | ||||
-rw-r--r-- | sys/sys/uio.h | 1 | ||||
-rw-r--r-- | sys/ufs/ffs/ffs_vnops.c | 108 | ||||
-rw-r--r-- | sys/vm/vm_fault.c | 7 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 249 | ||||
-rw-r--r-- | sys/vm/vm_map.h | 1 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 134 | ||||
-rw-r--r-- | sys/vm/vm_object.h | 4 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 3 |
12 files changed, 2 insertions, 635 deletions
diff --git a/sys/conf/NOTES b/sys/conf/NOTES index 3d85184..d4112dc 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -756,21 +756,6 @@ options EXT2FS # unsuitable for inclusion on machines with untrusted local users. options VFS_AIO -# Enable the code UFS IO optimization through the VM system. This allows -# use VM operations instead of copying operations when possible. -# -# Even with this enabled, actual use of the code is still controlled by the -# sysctl vfs.ioopt. 0 gives no optimization, 1 gives normal (use VM -# operations if a request happens to fit), 2 gives agressive optimization -# (the operations are split to do as much as possible through the VM system.) -# -# Enabling this will probably not give an overall speedup except for -# special workloads. -# -# WARNING: Do not enable this, it is known to be broken, and will result -# in system instability, as well as possible data loss. -options ENABLE_VFS_IOOPT - # Cryptographically secure random number generator; /dev/[u]random device random diff --git a/sys/conf/options b/sys/conf/options index 5047d70..f5868fe 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -460,7 +460,6 @@ DEBUG_LOCKS opt_global.h DEBUG_VFS_LOCKS opt_global.h LOOKUP_SHARED opt_global.h DIAGNOSTIC opt_global.h -ENABLE_VFS_IOOPT opt_global.h INVARIANT_SUPPORT opt_global.h INVARIANTS opt_global.h MCLSHIFT opt_global.h diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c index a4bbcee..6728e8d 100644 --- a/sys/kern/kern_subr.c +++ b/sys/kern/kern_subr.c @@ -59,8 +59,6 @@ #include <vm/vm_map.h> #ifdef ZERO_COPY_SOCKETS #include <vm/vm_param.h> -#endif -#if defined(ZERO_COPY_SOCKETS) || defined(ENABLE_VFS_IOOPT) #include <vm/vm_object.h> #endif @@ -197,7 +195,7 @@ out: return (error); } -#if defined(ENABLE_VFS_IOOPT) || defined(ZERO_COPY_SOCKETS) +#ifdef ZERO_COPY_SOCKETS /* * Experimental support for zero-copy I/O */ @@ -209,9 +207,6 @@ userspaceco(void *cp, u_int cnt, struct uio *uio, struct vm_object *obj, int error; iov = uio->uio_iov; - -#ifdef ZERO_COPY_SOCKETS - if (uio->uio_rw == UIO_READ) { if ((so_zero_copy_receive != 0) && (obj != NULL) @@ -240,43 +235,12 @@ userspaceco(void *cp, u_int cnt, struct uio *uio, struct vm_object *obj, */ if (error != 0) error = copyout(cp, iov->iov_base, cnt); -#ifdef ENABLE_VFS_IOOPT - } else if ((vfs_ioopt != 0) - && ((cnt & PAGE_MASK) == 0) - && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) - && ((uio->uio_offset & PAGE_MASK) == 0) - && ((((intptr_t) cp) & PAGE_MASK) == 0)) { - error = vm_uiomove(&curproc->p_vmspace->vm_map, obj, - uio->uio_offset, cnt, - (vm_offset_t) iov->iov_base, NULL); -#endif /* ENABLE_VFS_IOOPT */ } else { error = copyout(cp, iov->iov_base, cnt); } } else { error = copyin(iov->iov_base, cp, cnt); } -#else /* ZERO_COPY_SOCKETS */ - if (uio->uio_rw == UIO_READ) { -#ifdef ENABLE_VFS_IOOPT - if ((vfs_ioopt != 0) - && ((cnt & PAGE_MASK) == 0) - && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) - && ((uio->uio_offset & PAGE_MASK) == 0) - && ((((intptr_t) cp) & PAGE_MASK) == 0)) { - error = vm_uiomove(&curproc->p_vmspace->vm_map, obj, - uio->uio_offset, cnt, - (vm_offset_t) iov->iov_base, NULL); - } else -#endif /* ENABLE_VFS_IOOPT */ - { - error = copyout(cp, iov->iov_base, cnt); - } - } else { - error = copyin(iov->iov_base, cp, cnt); - } -#endif /* ZERO_COPY_SOCKETS */ - return (error); } @@ -334,75 +298,7 @@ uiomoveco(void *cp, int n, struct uio *uio, struct vm_object *obj, } return (0); } -#endif /* ENABLE_VFS_IOOPT || ZERO_COPY_SOCKETS */ - -#ifdef ENABLE_VFS_IOOPT - -/* - * Experimental support for zero-copy I/O - */ -int -uioread(int n, struct uio *uio, struct vm_object *obj, int *nread) -{ - int npagesmoved; - struct iovec *iov; - u_int cnt, tcnt; - int error; - - *nread = 0; - if (vfs_ioopt < 2) - return 0; - - error = 0; - - while (n > 0 && uio->uio_resid) { - iov = uio->uio_iov; - cnt = iov->iov_len; - if (cnt == 0) { - uio->uio_iov++; - uio->uio_iovcnt--; - continue; - } - if (cnt > n) - cnt = n; - - if ((uio->uio_segflg == UIO_USERSPACE) && - ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) && - ((uio->uio_offset & PAGE_MASK) == 0) ) { - - if (cnt < PAGE_SIZE) - break; - - cnt &= ~PAGE_MASK; - - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); - error = vm_uiomove(&curproc->p_vmspace->vm_map, obj, - uio->uio_offset, cnt, - (vm_offset_t) iov->iov_base, &npagesmoved); - - if (npagesmoved == 0) - break; - - tcnt = npagesmoved * PAGE_SIZE; - cnt = tcnt; - - if (error) - break; - - iov->iov_base = (char *)iov->iov_base + cnt; - iov->iov_len -= cnt; - uio->uio_resid -= cnt; - uio->uio_offset += cnt; - *nread += cnt; - n -= cnt; - } else { - break; - } - } - return error; -} -#endif /* ENABLE_VFS_IOOPT */ +#endif /* ZERO_COPY_SOCKETS */ /* * Give next character to user as result of read. diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 7e21555..5f4b678 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -130,12 +130,6 @@ SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, " static int nameileafonly; SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, ""); -#ifdef ENABLE_VFS_IOOPT -/* See NOTES for a description of this setting. */ -int vfs_ioopt; -SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); -#endif - /* * Cache for the mount type id assigned to NFS. This is used for * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. diff --git a/sys/sys/uio.h b/sys/sys/uio.h index 8604360..e70e9f6 100644 --- a/sys/sys/uio.h +++ b/sys/sys/uio.h @@ -88,7 +88,6 @@ struct vm_object; void uio_yield(void); int uiomove(void *, int, struct uio *); int uiomoveco(void *, int, struct uio *, struct vm_object *, int); -int uioread(int, struct uio *, struct vm_object *, int *); int copyinfrom(const void *src, void *dst, size_t len, int seg); int copyinstrfrom(const void *src, void *dst, size_t len, size_t *copied, int seg); diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c index 9b39812..cc6debc 100644 --- a/sys/ufs/ffs/ffs_vnops.c +++ b/sys/ufs/ffs/ffs_vnops.c @@ -389,45 +389,6 @@ ffs_read(ap) vm_object_reference(object); } -#ifdef ENABLE_VFS_IOOPT - /* - * If IO optimisation is turned on, - * and we are NOT a VM based IO request, - * (i.e. not headed for the buffer cache) - * but there IS a vm object associated with it. - */ - if ((ioflag & IO_VMIO) == 0 && (vfs_ioopt > 1) && object) { - int nread, toread; - - toread = uio->uio_resid; - if (toread > bytesinfile) - toread = bytesinfile; - if (toread >= PAGE_SIZE) { - /* - * Then if it's at least a page in size, try - * get the data from the object using vm tricks - */ - error = uioread(toread, uio, object, &nread); - if ((uio->uio_resid == 0) || (error != 0)) { - /* - * If we finished or there was an error - * then finish up (the reference previously - * obtained on object must be released). - */ - if ((error == 0 || - uio->uio_resid != orig_resid) && - (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) - ip->i_flag |= IN_ACCESS; - - if (object) { - vm_object_vndeallocate(object); - } - return error; - } - } - } -#endif - /* * Ok so we couldn't do it all in one vm trick... * so cycle around trying smaller bites.. @@ -435,52 +396,6 @@ ffs_read(ap) for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) break; -#ifdef ENABLE_VFS_IOOPT - if ((ioflag & IO_VMIO) == 0 && (vfs_ioopt > 1) && object) { - /* - * Obviously we didn't finish above, but we - * didn't get an error either. Try the same trick again. - * but this time we are looping. - */ - int nread, toread; - toread = uio->uio_resid; - if (toread > bytesinfile) - toread = bytesinfile; - - /* - * Once again, if there isn't enough for a - * whole page, don't try optimising. - */ - if (toread >= PAGE_SIZE) { - error = uioread(toread, uio, object, &nread); - if ((uio->uio_resid == 0) || (error != 0)) { - /* - * If we finished or there was an - * error then finish up (the reference - * previously obtained on object must - * be released). - */ - if ((error == 0 || - uio->uio_resid != orig_resid) && - (vp->v_mount->mnt_flag & - MNT_NOATIME) == 0) - ip->i_flag |= IN_ACCESS; - if (object) { - vm_object_vndeallocate(object); - } - return error; - } - /* - * To get here we didnt't finish or err. - * If we did get some data, - * loop to try another bite. - */ - if (nread > 0) { - continue; - } - } - } -#endif lbn = lblkno(fs, uio->uio_offset); nextlbn = lbn + 1; @@ -575,22 +490,6 @@ ffs_read(ap) xfersize = size; } -#ifdef ENABLE_VFS_IOOPT - if (vfs_ioopt && object && - (bp->b_flags & B_VMIO) && - ((blkoffset & PAGE_MASK) == 0) && - ((xfersize & PAGE_MASK) == 0)) { - /* - * If VFS IO optimisation is turned on, - * and it's an exact page multiple - * And a normal VM based op, - * then use uiomiveco() - */ - error = - uiomoveco((char *)bp->b_data + blkoffset, - (int)xfersize, uio, object, 0); - } else -#endif { /* * otherwise use the general form @@ -755,13 +654,6 @@ ffs_write(ap) if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) flags |= IO_SYNC; -#ifdef ENABLE_VFS_IOOPT - if (object && (object->flags & OBJ_OPT)) { - vm_freeze_copyopts(object, - OFF_TO_IDX(uio->uio_offset), - OFF_TO_IDX(uio->uio_offset + uio->uio_resid + PAGE_MASK)); - } -#endif for (error = 0; uio->uio_resid > 0;) { lbn = lblkno(fs, uio->uio_offset); blkoffset = blkoff(fs, uio->uio_offset); diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index abb9ef3..e443a00 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -280,13 +280,6 @@ RetryFault:; fs.vp = vnode_pager_lock(fs.first_object); vm_object_pip_add(fs.first_object, 1); -#ifdef ENABLE_VFS_IOOPT - if ((fault_type & VM_PROT_WRITE) && - (fs.first_object->type == OBJT_VNODE)) { - vm_freeze_copyopts(fs.first_object, - fs.first_pindex, fs.first_pindex + 1); - } -#endif fs.lookup_still_valid = TRUE; if (wired) diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index cd3b2e7..33e819d 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -2879,255 +2879,6 @@ vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) vm_map_unlock_read(map); } -#ifdef ENABLE_VFS_IOOPT -/* - * Experimental support for zero-copy I/O - * - * Implement uiomove with VM operations. This handles (and collateral changes) - * support every combination of source object modification, and COW type - * operations. - */ -int -vm_uiomove( - vm_map_t mapa, - vm_object_t srcobject, - off_t cp, - int cnta, - vm_offset_t uaddra, - int *npages) -{ - vm_map_t map; - vm_object_t first_object, oldobject, object; - vm_map_entry_t entry; - vm_prot_t prot; - boolean_t wired; - int tcnt, rv; - vm_offset_t uaddr, start, end, tend; - vm_pindex_t first_pindex, oindex; - vm_size_t osize; - off_t ooffset; - int cnt; - - GIANT_REQUIRED; - - if (npages) - *npages = 0; - - cnt = cnta; - uaddr = uaddra; - - while (cnt > 0) { - map = mapa; - - if ((vm_map_lookup(&map, uaddr, - VM_PROT_READ, &entry, &first_object, - &first_pindex, &prot, &wired)) != KERN_SUCCESS) { - return EFAULT; - } - - vm_map_clip_start(map, entry, uaddr); - - tcnt = cnt; - tend = uaddr + tcnt; - if (tend > entry->end) { - tcnt = entry->end - uaddr; - tend = entry->end; - } - - vm_map_clip_end(map, entry, tend); - - start = entry->start; - end = entry->end; - - osize = atop(tcnt); - - oindex = OFF_TO_IDX(cp); - if (npages) { - vm_size_t idx; - for (idx = 0; idx < osize; idx++) { - vm_page_t m; - if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { - vm_map_lookup_done(map, entry); - return 0; - } - /* - * disallow busy or invalid pages, but allow - * m->busy pages if they are entirely valid. - */ - if ((m->flags & PG_BUSY) || - ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { - vm_map_lookup_done(map, entry); - return 0; - } - } - } - -/* - * If we are changing an existing map entry, just redirect - * the object, and change mappings. - */ - if ((first_object->type == OBJT_VNODE) && - ((oldobject = entry->object.vm_object) == first_object)) { - - if ((entry->offset != cp) || (oldobject != srcobject)) { - /* - * Remove old window into the file - */ - vm_page_lock_queues(); - pmap_remove(map->pmap, uaddr, tend); - vm_page_unlock_queues(); - - /* - * Force copy on write for mmaped regions - */ - vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); - - /* - * Point the object appropriately - */ - if (oldobject != srcobject) { - - /* - * Set the object optimization hint flag - */ - vm_object_set_flag(srcobject, OBJ_OPT); - vm_object_reference(srcobject); - entry->object.vm_object = srcobject; - - if (oldobject) { - vm_object_deallocate(oldobject); - } - } - - entry->offset = cp; - map->timestamp++; - } else { - vm_page_lock_queues(); - pmap_remove(map->pmap, uaddr, tend); - vm_page_unlock_queues(); - } - - } else if ((first_object->ref_count == 1) && - (first_object->size == osize) && - ((first_object->type == OBJT_DEFAULT) || - (first_object->type == OBJT_SWAP)) ) { - - oldobject = first_object->backing_object; - - if ((first_object->backing_object_offset != cp) || - (oldobject != srcobject)) { - /* - * Remove old window into the file - */ - vm_page_lock_queues(); - pmap_remove(map->pmap, uaddr, tend); - vm_page_unlock_queues(); - - /* - * Remove unneeded old pages - */ - vm_object_lock(first_object); - vm_object_page_remove(first_object, 0, 0, 0); - vm_object_unlock(first_object); - - /* - * Invalidate swap space - */ - if (first_object->type == OBJT_SWAP) { - swap_pager_freespace(first_object, - 0, - first_object->size); - } - - /* - * Force copy on write for mmaped regions - */ - vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); - - /* - * Point the object appropriately - */ - if (oldobject != srcobject) { - /* - * Set the object optimization hint flag - */ - vm_object_set_flag(srcobject, OBJ_OPT); - vm_object_reference(srcobject); - - if (oldobject) { - TAILQ_REMOVE(&oldobject->shadow_head, - first_object, shadow_list); - oldobject->shadow_count--; - /* XXX bump generation? */ - vm_object_deallocate(oldobject); - } - - TAILQ_INSERT_TAIL(&srcobject->shadow_head, - first_object, shadow_list); - srcobject->shadow_count++; - /* XXX bump generation? */ - - first_object->backing_object = srcobject; - } - first_object->backing_object_offset = cp; - map->timestamp++; - } else { - vm_page_lock_queues(); - pmap_remove(map->pmap, uaddr, tend); - vm_page_unlock_queues(); - } -/* - * Otherwise, we have to do a logical mmap. - */ - } else { - - vm_object_set_flag(srcobject, OBJ_OPT); - vm_object_reference(srcobject); - - vm_page_lock_queues(); - pmap_remove(map->pmap, uaddr, tend); - vm_page_unlock_queues(); - - vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); - vm_map_lock_upgrade(map); - - if (entry == &map->header) { - map->first_free = &map->header; - } else if (map->first_free->start >= start) { - map->first_free = entry->prev; - } - - vm_map_entry_delete(map, entry); - - object = srcobject; - ooffset = cp; - - rv = vm_map_insert(map, object, ooffset, start, tend, - VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE); - - if (rv != KERN_SUCCESS) - panic("vm_uiomove: could not insert new entry: %d", rv); - } - -/* - * Map the window directly, if it is already in memory - */ - pmap_object_init_pt(map->pmap, uaddr, - srcobject, oindex, tcnt, 0); - - map->timestamp++; - vm_map_unlock(map); - - cnt -= tcnt; - uaddr += tcnt; - cp += tcnt; - if (npages) - *npages += osize; - } - return 0; -} -#endif - #include "opt_ddb.h" #ifdef DDB #include <sys/kernel.h> diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index 3809d06..eef9a48 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -322,6 +322,5 @@ int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t user_wire); int vmspace_swap_count (struct vmspace *vmspace); -int vm_uiomove(vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *); #endif /* _KERNEL */ #endif /* _VM_MAP_ */ diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 2fe605d..b51987b 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -407,9 +407,6 @@ vm_object_vndeallocate(vm_object_t object) if (object->ref_count == 0) { mp_fixme("Unlocked vflag access."); vp->v_vflag &= ~VV_TEXT; -#ifdef ENABLE_VFS_IOOPT - vm_object_clear_flag(object, OBJ_OPT); -#endif } /* * vrele may need a vop lock @@ -502,10 +499,6 @@ doterm: if (temp) { TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); temp->shadow_count--; -#ifdef ENABLE_VFS_IOOPT - if (temp->ref_count == 0) - vm_object_clear_flag(temp, OBJ_OPT); -#endif temp->generation++; object->backing_object = NULL; } @@ -556,12 +549,6 @@ vm_object_terminate(vm_object_t object) if (object->type == OBJT_VNODE) { struct vnode *vp; -#ifdef ENABLE_VFS_IOOPT - /* - * Freeze optimized copies. - */ - vm_freeze_copyopts(object, 0, object->size); -#endif /* * Clean pages and flush buffers. */ @@ -937,39 +924,6 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, return(maxf + 1); } -#ifdef ENABLE_VFS_IOOPT -/* - * Same as vm_object_pmap_copy, except range checking really - * works, and is meant for small sections of an object. - * - * This code protects resident pages by making them read-only - * and is typically called on a fork or split when a page - * is converted to copy-on-write. - * - * NOTE: If the page is already at VM_PROT_NONE, calling - * pmap_page_protect will have no effect. - */ -void -vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) -{ - vm_pindex_t idx; - vm_page_t p; - - GIANT_REQUIRED; - - if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) - return; - vm_page_lock_queues(); - for (idx = start; idx < end; idx++) { - p = vm_page_lookup(object, idx); - if (p == NULL) - continue; - pmap_page_protect(p, VM_PROT_READ); - } - vm_page_unlock_queues(); -} -#endif - /* * vm_object_madvise: * @@ -1852,94 +1806,6 @@ vm_object_set_writeable_dirty(vm_object_t object) } } -#ifdef ENABLE_VFS_IOOPT -/* - * Experimental support for zero-copy I/O - * - * Performs the copy_on_write operations necessary to allow the virtual copies - * into user space to work. This has to be called for write(2) system calls - * from other processes, file unlinking, and file size shrinkage. - */ -void -vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) -{ - int rv; - vm_object_t robject; - vm_pindex_t idx; - - GIANT_REQUIRED; - if ((object == NULL) || - ((object->flags & OBJ_OPT) == 0)) - return; - - if (object->shadow_count > object->ref_count) - panic("vm_freeze_copyopts: sc > rc"); - - while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { - vm_pindex_t bo_pindex; - vm_page_t m_in, m_out; - - bo_pindex = OFF_TO_IDX(robject->backing_object_offset); - - vm_object_reference(robject); - - vm_object_pip_wait(robject, "objfrz"); - - if (robject->ref_count == 1) { - vm_object_deallocate(robject); - continue; - } - - vm_object_pip_add(robject, 1); - - for (idx = 0; idx < robject->size; idx++) { - - m_out = vm_page_grab(robject, idx, - VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - - if (m_out->valid == 0) { - m_in = vm_page_grab(object, bo_pindex + idx, - VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - vm_page_lock_queues(); - if (m_in->valid == 0) { - vm_page_unlock_queues(); - rv = vm_pager_get_pages(object, &m_in, 1, 0); - if (rv != VM_PAGER_OK) { - printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); - continue; - } - vm_page_lock_queues(); - vm_page_deactivate(m_in); - } - - pmap_remove_all(m_in); - vm_page_unlock_queues(); - pmap_copy_page(m_in, m_out); - vm_page_lock_queues(); - m_out->valid = m_in->valid; - vm_page_dirty(m_out); - vm_page_activate(m_out); - vm_page_wakeup(m_in); - } else - vm_page_lock_queues(); - vm_page_wakeup(m_out); - vm_page_unlock_queues(); - } - - object->shadow_count--; - object->ref_count--; - TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); - robject->backing_object = NULL; - robject->backing_object_offset = 0; - - vm_object_pip_wakeup(robject); - vm_object_deallocate(robject); - } - - vm_object_clear_flag(object, OBJ_OPT); -} -#endif - #include "opt_ddb.h" #ifdef DDB #include <sys/kernel.h> diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index 1716e35..467d7f0 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -151,7 +151,6 @@ struct vm_object { #define OBJ_WRITEABLE 0x0080 /* object has been made writable */ #define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */ #define OBJ_CLEANING 0x0200 -#define OBJ_OPT 0x1000 /* I/O optimization */ #define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */ #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT) @@ -176,8 +175,6 @@ extern vm_object_t kmem_object; #define vm_object_unlock(object) \ mtx_unlock((object) == kmem_object ? &kmem_object->mtx : &Giant) -void vm_freeze_copyopts(vm_object_t, vm_pindex_t, vm_pindex_t); - void vm_object_set_flag(vm_object_t object, u_short bits); void vm_object_clear_flag(vm_object_t object, u_short bits); void vm_object_pip_add(vm_object_t object, short i); @@ -199,7 +196,6 @@ void vm_object_set_writeable_dirty (vm_object_t); void vm_object_init (void); void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t); void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t); -void vm_object_pmap_copy_1 (vm_object_t, vm_pindex_t, vm_pindex_t); void vm_object_reference (vm_object_t); void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t); void vm_object_split(vm_map_entry_t); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index f061ccb..5522bcb 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -307,9 +307,6 @@ vnode_pager_setsize(vp, nsize) * File has shrunk. Toss any cached pages beyond the new EOF. */ if (nsize < object->un_pager.vnp.vnp_size) { -#ifdef ENABLE_VFS_IOOPT - vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size); -#endif if (nobjsize < object->size) { vm_object_lock(object); vm_object_page_remove(object, nobjsize, object->size, |