summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-04 15:55:41 +0000
committeralc <alc@FreeBSD.org>2010-05-04 15:55:41 +0000
commitc9aaa1e2a21afcea58a3bec5e8cffd6652fbaa25 (patch)
tree08dad4596fb8ef71c441ab83e48cd31b53d4c26f
parentfbfe456f9697050cf749467e9b47e3aad49f09bb (diff)
downloadFreeBSD-src-c9aaa1e2a21afcea58a3bec5e8cffd6652fbaa25.zip
FreeBSD-src-c9aaa1e2a21afcea58a3bec5e8cffd6652fbaa25.tar.gz
Add page locking to the vm_page_cow* functions.
Push down the acquisition and release of the page queues lock into vm_page_wire(). Reviewed by: kib
-rw-r--r--sys/dev/drm/via_dmablit.c2
-rw-r--r--sys/kern/uipc_cow.c3
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/net/bpf_zerocopy.c2
-rw-r--r--sys/vm/vm_fault.c6
-rw-r--r--sys/vm/vm_page.c20
6 files changed, 13 insertions, 22 deletions
diff --git a/sys/dev/drm/via_dmablit.c b/sys/dev/drm/via_dmablit.c
index 72d914e..81d438a 100644
--- a/sys/dev/drm/via_dmablit.c
+++ b/sys/dev/drm/via_dmablit.c
@@ -251,10 +251,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
if (m == NULL)
break;
vm_page_lock(m);
- vm_page_lock_queues();
vm_page_wire(m);
vm_page_unhold(m);
- vm_page_unlock_queues();
vm_page_unlock(m);
vsg->pages[i] = m;
}
diff --git a/sys/kern/uipc_cow.c b/sys/kern/uipc_cow.c
index 88a5625..bc9a56a 100644
--- a/sys/kern/uipc_cow.c
+++ b/sys/kern/uipc_cow.c
@@ -131,10 +131,8 @@ socow_setup(struct mbuf *m0, struct uio *uio)
* set up COW
*/
vm_page_lock(pp);
- vm_page_lock_queues();
if (vm_page_cowsetup(pp) != 0) {
vm_page_unhold(pp);
- vm_page_unlock_queues();
vm_page_unlock(pp);
return (0);
}
@@ -144,7 +142,6 @@ socow_setup(struct mbuf *m0, struct uio *uio)
*/
vm_page_wire(pp);
vm_page_unhold(pp);
- vm_page_unlock_queues();
vm_page_unlock(pp);
/*
* Allocate an sf buf
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 5db2d9e..589bdbd 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -3043,9 +3043,7 @@ allocbuf(struct buf *bp, int size)
* We have a good page.
*/
vm_page_lock(m);
- vm_page_lock_queues();
vm_page_wire(m);
- vm_page_unlock_queues();
vm_page_unlock(m);
bp->b_pages[bp->b_npages] = m;
++bp->b_npages;
diff --git a/sys/net/bpf_zerocopy.c b/sys/net/bpf_zerocopy.c
index 5ca6978..5629093 100644
--- a/sys/net/bpf_zerocopy.c
+++ b/sys/net/bpf_zerocopy.c
@@ -171,10 +171,8 @@ zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
if (pp == NULL)
return (NULL);
vm_page_lock(pp);
- vm_page_lock_queues();
vm_page_wire(pp);
vm_page_unhold(pp);
- vm_page_unlock_queues();
vm_page_unlock(pp);
sf = sf_buf_alloc(pp, SFB_NOWAIT);
if (sf == NULL) {
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index f925594..142a9ef 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -315,8 +315,6 @@ RetryFault:;
(fault_type & VM_PROT_WRITE) &&
(fs.object == fs.first_object)) {
vm_page_cowfault(fs.m);
- vm_page_unlock_queues();
- vm_page_unlock(fs.m);
unlock_and_deallocate(&fs);
goto RetryFault;
}
@@ -797,9 +795,7 @@ vnode_locked:
if (wired && (fault_flags &
VM_FAULT_CHANGE_WIRING) == 0) {
vm_page_lock(fs.first_m);
- vm_page_lock_queues();
vm_page_wire(fs.first_m);
- vm_page_unlock_queues();
vm_page_unlock(fs.first_m);
vm_page_lock(fs.m);
@@ -1285,9 +1281,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
vm_page_unlock(src_m);
vm_page_lock(dst_m);
- vm_page_lock_queues();
vm_page_wire(dst_m);
- vm_page_unlock_queues();
vm_page_unlock(dst_m);
} else {
vm_page_lock(dst_m);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index d9288d9..957bf82 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1544,13 +1544,15 @@ vm_page_wire(vm_page_t m)
* and only unqueue the page if it is on some queue (if it is unmanaged
* it is already off the queues).
*/
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED);
if (m->flags & PG_FICTITIOUS)
return;
if (m->wire_count == 0) {
- if ((m->flags & PG_UNMANAGED) == 0)
+ if ((m->flags & PG_UNMANAGED) == 0) {
+ vm_page_lock_queues();
vm_pageq_remove(m);
+ vm_page_unlock_queues();
+ }
atomic_add_int(&cnt.v_wire_count, 1);
}
m->wire_count++;
@@ -1922,9 +1924,7 @@ retrylookup:
} else {
if ((allocflags & VM_ALLOC_WIRED) != 0) {
vm_page_lock(m);
- vm_page_lock_queues();
vm_page_wire(m);
- vm_page_unlock_queues();
vm_page_unlock(m);
}
if ((allocflags & VM_ALLOC_NOBUSY) == 0)
@@ -2224,6 +2224,7 @@ vm_page_cowfault(vm_page_t m)
vm_object_t object;
vm_pindex_t pindex;
+ vm_page_lock_assert(m, MA_OWNED);
object = m->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
KASSERT(object->paging_in_progress != 0,
@@ -2238,17 +2239,18 @@ vm_page_cowfault(vm_page_t m)
if (mnew == NULL) {
vm_page_insert(m, object, pindex);
vm_page_unlock_queues();
+ vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_WAIT;
VM_OBJECT_LOCK(object);
if (m == vm_page_lookup(object, pindex)) {
+ vm_page_lock(m);
vm_page_lock_queues();
goto retry_alloc;
} else {
/*
* Page disappeared during the wait.
*/
- vm_page_lock_queues();
return;
}
}
@@ -2269,13 +2271,15 @@ vm_page_cowfault(vm_page_t m)
mnew->wire_count = m->wire_count - m->cow;
m->wire_count = m->cow;
}
+ vm_page_unlock_queues();
+ vm_page_unlock(m);
}
void
vm_page_cowclear(vm_page_t m)
{
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ vm_page_lock_assert(m, MA_OWNED);
if (m->cow) {
m->cow--;
/*
@@ -2291,11 +2295,13 @@ int
vm_page_cowsetup(vm_page_t m)
{
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ vm_page_lock_assert(m, MA_OWNED);
if (m->cow == USHRT_MAX - 1)
return (EBUSY);
m->cow++;
+ vm_page_lock_queues();
pmap_remove_write(m);
+ vm_page_unlock_queues();
return (0);
}
OpenPOWER on IntegriCloud