summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-09-16 06:25:54 +0000
committerkib <kib@FreeBSD.org>2013-09-16 06:25:54 +0000
commit6796656333337e5530946dca854ffe0ce55b0cf0 (patch)
tree4db1f72cdac363c77df44aecb8f3e49bd8fb3732 /sys/vm/vm_page.c
parent9867f4e99b817ebbf5d787e92b2a8e2fe14808a1 (diff)
downloadFreeBSD-src-6796656333337e5530946dca854ffe0ce55b0cf0.zip
FreeBSD-src-6796656333337e5530946dca854ffe0ce55b0cf0.tar.gz
Remove zero-copy sockets code. It only worked for anonymous memory,
and the equivalent functionality is now provided by sendfile(2) over posix shared memory filedescriptor. Remove the cow member of struct vm_page, and rearrange the remaining members. While there, make hold_count unsigned. Requested and reviewed by: alc Tested by: pho Sponsored by: The FreeBSD Foundation Approved by: re (delphij)
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c104
1 files changed, 1 insertions, 103 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 3a2c7f0..ac28bc4 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -674,8 +674,8 @@ vm_page_unhold(vm_page_t mem)
{
vm_page_lock_assert(mem, MA_OWNED);
+ KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!"));
--mem->hold_count;
- KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
vm_page_free_toq(mem);
}
@@ -3108,108 +3108,6 @@ vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
}
#endif
-int so_zerocp_fullpage = 0;
-
-/*
- * Replace the given page with a copy. The copied page assumes
- * the portion of the given page's "wire_count" that is not the
- * responsibility of this copy-on-write mechanism.
- *
- * The object containing the given page must have a non-zero
- * paging-in-progress count and be locked.
- */
-void
-vm_page_cowfault(vm_page_t m)
-{
- vm_page_t mnew;
- vm_object_t object;
- vm_pindex_t pindex;
-
- vm_page_lock_assert(m, MA_OWNED);
- object = m->object;
- VM_OBJECT_ASSERT_WLOCKED(object);
- KASSERT(object->paging_in_progress != 0,
- ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
- object));
- pindex = m->pindex;
-
- retry_alloc:
- mnew = vm_page_alloc(NULL, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
- if (mnew == NULL) {
- vm_page_unlock(m);
- VM_OBJECT_WUNLOCK(object);
- VM_WAIT;
- VM_OBJECT_WLOCK(object);
- if (m == vm_page_lookup(object, pindex)) {
- vm_page_lock(m);
- goto retry_alloc;
- } else {
- /*
- * Page disappeared during the wait.
- */
- return;
- }
- }
-
- if (m->cow == 0) {
- /*
- * check to see if we raced with an xmit complete when
- * waiting to allocate a page. If so, put things back
- * the way they were
- */
- vm_page_unlock(m);
- vm_page_lock(mnew);
- vm_page_free(mnew);
- vm_page_unlock(mnew);
- } else { /* clear COW & copy page */
- pmap_remove_all(m);
- mnew->object = object;
- if (object->memattr != VM_MEMATTR_DEFAULT &&
- (object->flags & OBJ_FICTITIOUS) == 0)
- pmap_page_set_memattr(mnew, object->memattr);
- if (vm_page_replace(mnew, object, pindex) != m)
- panic("vm_page_cowfault: invalid page replacement");
- if (!so_zerocp_fullpage)
- pmap_copy_page(m, mnew);
- mnew->valid = VM_PAGE_BITS_ALL;
- vm_page_dirty(mnew);
- mnew->wire_count = m->wire_count - m->cow;
- m->wire_count = m->cow;
- vm_page_unlock(m);
- }
-}
-
-void
-vm_page_cowclear(vm_page_t m)
-{
-
- vm_page_lock_assert(m, MA_OWNED);
- if (m->cow) {
- m->cow--;
- /*
- * let vm_fault add back write permission lazily
- */
- }
- /*
- * sf_buf_free() will free the page, so we needn't do it here
- */
-}
-
-int
-vm_page_cowsetup(vm_page_t m)
-{
-
- vm_page_lock_assert(m, MA_OWNED);
- if ((m->flags & PG_FICTITIOUS) != 0 ||
- (m->oflags & VPO_UNMANAGED) != 0 ||
- m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYWLOCK(m->object))
- return (EBUSY);
- m->cow++;
- pmap_remove_write(m);
- VM_OBJECT_WUNLOCK(m->object);
- return (0);
-}
-
#ifdef INVARIANTS
void
vm_page_object_lock_assert(vm_page_t m)
OpenPOWER on IntegriCloud