summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/kern
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_exec.c8
-rw-r--r--sys/kern/subr_witness.c9
-rw-r--r--sys/kern/sys_pipe.c12
-rw-r--r--sys/kern/sys_process.c8
-rw-r--r--sys/kern/uipc_cow.c4
-rw-r--r--sys/kern/vfs_bio.c13
6 files changed, 34 insertions, 20 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index ed22519..bb92972 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -957,9 +957,9 @@ exec_map_first_page(imgp)
return (EIO);
}
}
- vm_page_lock_queues();
+ vm_page_lock(ma[0]);
vm_page_hold(ma[0]);
- vm_page_unlock_queues();
+ vm_page_unlock(ma[0]);
vm_page_wakeup(ma[0]);
VM_OBJECT_UNLOCK(object);
@@ -979,9 +979,9 @@ exec_unmap_first_page(imgp)
m = sf_buf_page(imgp->firstpage);
sf_buf_free(imgp->firstpage);
imgp->firstpage = NULL;
- vm_page_lock_queues();
+ vm_page_lock(m);
vm_page_unhold(m);
- vm_page_unlock_queues();
+ vm_page_unlock(m);
}
}
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 5b7d565..ef1bc39 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -597,6 +597,15 @@ static struct witness_order_list_entry order_lists[] = {
{ "cdev", &lock_class_mtx_sleep },
{ NULL, NULL },
/*
+ * VM
+ *
+ */
+ { "vm object", &lock_class_mtx_sleep },
+ { "page lock", &lock_class_mtx_sleep },
+ { "vm page queue mutex", &lock_class_mtx_sleep },
+ { "pmap", &lock_class_mtx_sleep },
+ { NULL, NULL },
+ /*
* kqueue/VFS interaction
*/
{ "kqueue", &lock_class_mtx_sleep },
diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c
index e098648..7130c26 100644
--- a/sys/kern/sys_pipe.c
+++ b/sys/kern/sys_pipe.c
@@ -773,10 +773,12 @@ pipe_build_write_buffer(wpipe, uio)
*/
race:
if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) {
- vm_page_lock_queues();
- for (j = 0; j < i; j++)
+
+ for (j = 0; j < i; j++) {
+ vm_page_lock(wpipe->pipe_map.ms[j]);
vm_page_unhold(wpipe->pipe_map.ms[j]);
- vm_page_unlock_queues();
+ vm_page_unlock(wpipe->pipe_map.ms[j]);
+ }
return (EFAULT);
}
wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr,
@@ -816,11 +818,11 @@ pipe_destroy_write_buffer(wpipe)
int i;
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
- vm_page_lock_queues();
for (i = 0; i < wpipe->pipe_map.npages; i++) {
+ vm_page_lock(wpipe->pipe_map.ms[i]);
vm_page_unhold(wpipe->pipe_map.ms[i]);
+ vm_page_unlock(wpipe->pipe_map.ms[i]);
}
- vm_page_unlock_queues();
wpipe->pipe_map.npages = 0;
}
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index f5671d9..d8cc4f0 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -328,9 +328,9 @@ proc_rwmem(struct proc *p, struct uio *uio)
/*
* Hold the page in memory.
*/
- vm_page_lock_queues();
+ vm_page_lock(m);
vm_page_hold(m);
- vm_page_unlock_queues();
+ vm_page_unlock(m);
/*
* We're done with tmap now.
@@ -349,9 +349,9 @@ proc_rwmem(struct proc *p, struct uio *uio)
/*
* Release the page.
*/
- vm_page_lock_queues();
+ vm_page_lock(m);
vm_page_unhold(m);
- vm_page_unlock_queues();
+ vm_page_unlock(m);
} while (error == 0 && uio->uio_resid > 0);
diff --git a/sys/kern/uipc_cow.c b/sys/kern/uipc_cow.c
index 52988dd..5c0dcd2 100644
--- a/sys/kern/uipc_cow.c
+++ b/sys/kern/uipc_cow.c
@@ -128,10 +128,12 @@ socow_setup(struct mbuf *m0, struct uio *uio)
/*
* set up COW
*/
+ vm_page_lock(pp);
vm_page_lock_queues();
if (vm_page_cowsetup(pp) != 0) {
vm_page_unhold(pp);
vm_page_unlock_queues();
+ vm_page_unlock(pp);
return (0);
}
@@ -141,7 +143,7 @@ socow_setup(struct mbuf *m0, struct uio *uio)
vm_page_wire(pp);
vm_page_unhold(pp);
vm_page_unlock_queues();
-
+ vm_page_unlock(pp);
/*
* Allocate an sf buf
*/
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 156b676..ea846a5 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -3860,12 +3860,12 @@ vmapbuf(struct buf *bp)
retry:
if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data,
prot) < 0) {
- vm_page_lock_queues();
for (i = 0; i < pidx; ++i) {
+ vm_page_lock(bp->b_pages[i]);
vm_page_unhold(bp->b_pages[i]);
+ vm_page_unlock(bp->b_pages[i]);
bp->b_pages[i] = NULL;
}
- vm_page_unlock_queues();
return(-1);
}
m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot);
@@ -3896,11 +3896,12 @@ vunmapbuf(struct buf *bp)
npages = bp->b_npages;
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
- vm_page_lock_queues();
- for (pidx = 0; pidx < npages; pidx++)
+ for (pidx = 0; pidx < npages; pidx++) {
+ vm_page_lock(bp->b_pages[pidx]);
vm_page_unhold(bp->b_pages[pidx]);
- vm_page_unlock_queues();
-
+ vm_page_unlock(bp->b_pages[pidx]);
+ }
+
bp->b_data = bp->b_saveaddr;
}
OpenPOWER on IntegriCloud