summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-07-29 18:56:31 +0000
committeralc <alc@FreeBSD.org>2004-07-29 18:56:31 +0000
commitff80a7c7f2ea4c99244975d743264c31598730eb (patch)
tree0c874b2def953e485c7cd9018e8b2b5e78498462 /sys/vm/vm_page.c
parentf34d6d13842d3ae7ca2d607a2a5020c512670dca (diff)
downloadFreeBSD-src-ff80a7c7f2ea4c99244975d743264c31598730eb.zip
FreeBSD-src-ff80a7c7f2ea4c99244975d743264c31598730eb.tar.gz
Advance the state of pmap locking on alpha, amd64, and i386.
- Enable recursion on the page queues lock. This allows calls to vm_page_alloc(VM_ALLOC_NORMAL) and UMA's obj_alloc() with the page queues lock held. Such calls are made to allocate page table pages and pv entries. - The previous change enables a partial reversion of vm/vm_page.c revision 1.216, i.e., the call to vm_page_alloc() by vm_page_cowfault() now specifies VM_ALLOC_NORMAL rather than VM_ALLOC_INTERRUPT. - Add partial locking to pmap_copy(). (As a side-effect, pmap_copy() should now be faster on i386 SMP because it no longer generates IPIs for TLB shootdown on the other processors.) - Complete the locking of pmap_enter() and pmap_enter_quick(). (As of now, all changes to a user-level pmap on alpha, amd64, and i386 are performed with appropriate locking.)
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 5ecc221..e9adf50 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -204,7 +204,8 @@ vm_page_startup(vm_offset_t vaddr)
/*
* Initialize the locks.
*/
- mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF);
+ mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
+ MTX_RECURSE);
mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
MTX_SPIN);
@@ -1657,11 +1658,7 @@ vm_page_cowfault(vm_page_t m)
retry_alloc:
vm_page_remove(m);
- /*
- * An interrupt allocation is requested because the page
- * queues lock is held.
- */
- mnew = vm_page_alloc(object, pindex, VM_ALLOC_INTERRUPT);
+ mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
if (mnew == NULL) {
vm_page_insert(m, object, pindex);
vm_page_unlock_queues();
OpenPOWER on IntegriCloud