summaryrefslogtreecommitdiffstats
path: root/sys/vm/sg_pager.c
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/vm/sg_pager.c
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/vm/sg_pager.c')
-rw-r--r--sys/vm/sg_pager.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/sys/vm/sg_pager.c b/sys/vm/sg_pager.c
index a17fe82..845401a 100644
--- a/sys/vm/sg_pager.c
+++ b/sys/vm/sg_pager.c
@@ -198,10 +198,13 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq);
/* Free the original pages and insert this fake page into the object. */
- vm_page_lock_queues();
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
+ vm_page_lock(m[i]);
+ vm_page_lock_queues();
vm_page_free(m[i]);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ vm_page_unlock(m[i]);
+ }
vm_page_insert(page, object, offset);
m[reqpage] = page;
page->valid = VM_PAGE_BITS_ALL;
OpenPOWER on IntegriCloud