summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_glue.c
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/vm/vm_glue.c
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/vm/vm_glue.c')
-rw-r--r--sys/vm/vm_glue.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 4eeaa4d..288c5d7 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -257,16 +257,18 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
if (m == NULL)
goto out;
if (rv != VM_PAGER_OK) {
+ vm_page_lock(m);
vm_page_lock_queues();
vm_page_free(m);
vm_page_unlock_queues();
+ vm_page_unlock(m);
m = NULL;
goto out;
}
}
- vm_page_lock_queues();
+ vm_page_lock(m);
vm_page_hold(m);
- vm_page_unlock_queues();
+ vm_page_unlock(m);
vm_page_wakeup(m);
out:
VM_OBJECT_UNLOCK(object);
@@ -300,9 +302,9 @@ vm_imgact_unmap_page(struct sf_buf *sf)
m = sf_buf_page(sf);
sf_buf_free(sf);
sched_unpin();
- vm_page_lock_queues();
+ vm_page_lock(m);
vm_page_unhold(m);
- vm_page_unlock_queues();
+ vm_page_unlock(m);
}
void
@@ -434,10 +436,12 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("vm_thread_dispose: kstack already missing?");
+ vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
+ vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(ksobj);
vm_object_deallocate(ksobj);
@@ -524,9 +528,11 @@ vm_thread_swapout(struct thread *td)
if (m == NULL)
panic("vm_thread_swapout: kstack already missing?");
vm_page_dirty(m);
+ vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
+ vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(ksobj);
}
OpenPOWER on IntegriCloud