summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-01-08 20:48:26 +0000
committeralc <alc@FreeBSD.org>2004-01-08 20:48:26 +0000
commit9f7878e05ab8473614dbd02b348aced4f69c8f39 (patch)
tree93969e6896f2afbaefcb25fb23dc6cfbc7fa03cb /sys/vm
parentc87939ff3b0d7321c03632991a1435fd009a8763 (diff)
downloadFreeBSD-src-9f7878e05ab8473614dbd02b348aced4f69c8f39.zip
FreeBSD-src-9f7878e05ab8473614dbd02b348aced4f69c8f39.tar.gz
- Enable recursive acquisition of the mutex synchronizing access to the
free pages queue. This is presently needed by contigmalloc1(). - Move a sanity check against attempted double allocation of two pages to the same vm object offset from vm_page_alloc() to vm_page_insert(). This provides better protection because double allocation could occur through a direct call to vm_page_insert(), such as that by vm_page_rename(). - Modify contigmalloc1() to hold the mutex synchronizing access to the free pages queue while it scans vm_page_array in search of free pages. - Correct a potential leak of pages by contigmalloc1() that I introduced in revision 1.20: We must convert all cache queue pages to free pages before we begin removing free pages from the free queue. Otherwise, if we have to restart the scan because we are unable to acquire the vm object lock that is necessary to convert a cache queue page to a free page, we leak those free pages already removed from the free queue.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_contig.c8
-rw-r--r--sys/vm/vm_page.c13
2 files changed, 13 insertions, 8 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 3628b37..68adc46 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -168,6 +168,7 @@ contigmalloc1(
for (pass = 0; pass <= 1; pass++) {
s = splvm();
vm_page_lock_queues();
+ mtx_lock_spin(&vm_page_queue_free_mtx);
again:
/*
* Find first page in array that is free, within range,
@@ -188,6 +189,7 @@ again:
*/
if ((i == cnt.v_page_count) ||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
+ mtx_unlock_spin(&vm_page_queue_free_mtx);
again1:
if (vm_contig_launder(PQ_INACTIVE))
goto again1;
@@ -224,7 +226,9 @@ again1:
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
}
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ }
+ for (i = start; i < (start + size / PAGE_SIZE); i++) {
+ vm_page_t m = &pga[i];
vm_pageq_remove_nowakeup(m);
m->valid = VM_PAGE_BITS_ALL;
if (m->flags & PG_ZERO)
@@ -236,8 +240,8 @@ again1:
m->wire_count = 0;
m->busy = 0;
m->object = NULL;
- mtx_unlock_spin(&vm_page_queue_free_mtx);
}
+ mtx_unlock_spin(&vm_page_queue_free_mtx);
vm_page_unlock_queues();
/*
* We've found a contiguous chunk that meets are requirements.
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 310b45b..21103b3 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -206,11 +206,12 @@ vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
end = phys_avail[biggestone+1];
/*
- * Initialize the locks.
+ * Initialize the locks. Recursive acquisition of the vm page
+ * queue free mutex begins in contigmalloc1().
*/
mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF);
mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
- MTX_SPIN);
+ MTX_RECURSE | MTX_SPIN);
/*
* Initialize the queue headers for the free queue, the active queue
@@ -529,7 +530,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
if (m->object != NULL)
- panic("vm_page_insert: already inserted");
+ panic("vm_page_insert: page already inserted");
/*
* Record the object/offset pair in this page
@@ -552,7 +553,9 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
m->right = root;
root->left = NULL;
TAILQ_INSERT_BEFORE(root, m, listq);
- } else {
+ } else if (pindex == root->pindex)
+ panic("vm_page_insert: offset already allocated");
+ else {
m->right = root->right;
m->left = root;
root->right = NULL;
@@ -754,8 +757,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
KASSERT(object != NULL,
("vm_page_alloc: NULL object."));
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- KASSERT(!vm_page_lookup(object, pindex),
- ("vm_page_alloc: page already allocated"));
color = (pindex + object->pg_color) & PQ_L2_MASK;
} else
color = pindex & PQ_L2_MASK;
OpenPOWER on IntegriCloud