summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-02-05 06:02:55 +0000
committeralc <alc@FreeBSD.org>2007-02-05 06:02:55 +0000
commit4881bd38e2f4061efa354e25c1c66cc0275393a7 (patch)
tree637e68f96e2bfd32a294cf1c8e9174af72387c26 /sys/vm
parent7925e63ddf2503578b494beaa7b4e98a843ed6d9 (diff)
downloadFreeBSD-src-4881bd38e2f4061efa354e25c1c66cc0275393a7.zip
FreeBSD-src-4881bd38e2f4061efa354e25c1c66cc0275393a7.tar.gz
Change the free page queue lock from a spin mutex to a default (blocking)
mutex. With the demise of Alpha support, there is no longer a reason for it to be a spin mutex.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_contig.c14
-rw-r--r--sys/vm/vm_page.c18
-rw-r--r--sys/vm/vm_pageq.c4
-rw-r--r--sys/vm/vm_zeroidle.c8
4 files changed, 22 insertions, 22 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 1e86998..066b430 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -198,7 +198,7 @@ contigmalloc1(
for (pass = 2; pass >= 0; pass--) {
vm_page_lock_queues();
again0:
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
again:
/*
* Find first page in array that is free, within range,
@@ -219,7 +219,7 @@ again:
*/
if ((i == cnt.v_page_count) ||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
/*
* Instead of racing to empty the inactive/active
* queues, give up, even with more left to free,
@@ -260,7 +260,7 @@ again1:
goto again;
}
}
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
for (i = start; i < (start + size / PAGE_SIZE); i++) {
vm_page_t m = &pga[i];
@@ -283,7 +283,7 @@ again1:
VM_OBJECT_UNLOCK(object);
}
}
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
for (i = start; i < (start + size / PAGE_SIZE); i++) {
pqtype = pga[i].queue - pga[i].pc;
if (pqtype != PQ_FREE) {
@@ -304,7 +304,7 @@ again1:
m->wire_count = 0;
m->busy = 0;
}
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
vm_page_unlock_queues();
/*
* We've found a contiguous chunk that meets are requirements.
@@ -368,12 +368,12 @@ vm_contig_unqueue_free(vm_page_t m)
{
int error = 0;
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
if ((m->queue - m->pc) == PQ_FREE)
vm_pageq_remove_nowakeup(m);
else
error = EAGAIN;
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
if (error)
return (error);
m->valid = VM_PAGE_BITS_ALL;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index f834c31..4234635 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -254,7 +254,7 @@ vm_page_startup(vm_offset_t vaddr)
mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
MTX_RECURSE);
mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
- MTX_SPIN);
+ MTX_DEF);
/*
* Initialize the queue headers for the free queue, the active queue
@@ -869,7 +869,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
};
loop:
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
if (cnt.v_free_count > cnt.v_free_reserved ||
(page_req == VM_ALLOC_SYSTEM &&
cnt.v_cache_count == 0 &&
@@ -881,7 +881,7 @@ loop:
*/
m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0);
} else if (page_req != VM_ALLOC_INTERRUPT) {
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
/*
* Allocatable from cache (non-interrupt only). On success,
* we must free the page and try again, thus ensuring that
@@ -899,9 +899,9 @@ loop:
if (page_req != VM_ALLOC_SYSTEM)
return (NULL);
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
if (cnt.v_free_count <= cnt.v_interrupt_free_min) {
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
}
m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0);
@@ -913,7 +913,7 @@ loop:
/*
* Not allocatable from cache from interrupt, give up.
*/
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
atomic_add_int(&vm_pageout_deficit, 1);
pagedaemon_wakeup();
return (NULL);
@@ -957,7 +957,7 @@ loop:
m->busy = 0;
m->valid = 0;
KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
if ((req & VM_ALLOC_NOOBJ) == 0)
vm_page_insert(m, object, pindex);
@@ -1151,7 +1151,7 @@ vm_page_free_toq(vm_page_t m)
} else
VM_PAGE_SETQUEUE1(m, PQ_FREE);
pq = &vm_page_queues[VM_PAGE_GETQUEUE(m)];
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
pq->lcnt++;
++(*pq->cnt);
@@ -1165,7 +1165,7 @@ vm_page_free_toq(vm_page_t m)
} else {
TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
}
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
vm_page_free_wakeup();
}
diff --git a/sys/vm/vm_pageq.c b/sys/vm/vm_pageq.c
index 85dd9e5..1b3e9a4 100644
--- a/sys/vm/vm_pageq.c
+++ b/sys/vm/vm_pageq.c
@@ -198,9 +198,9 @@ vm_pageq_add_new_page(vm_paddr_t pa)
m->flags = 0;
m->pc = (pa >> PAGE_SHIFT) & PQ_COLORMASK;
pmap_page_init(m);
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
vm_pageq_enqueue(m->pc + PQ_FREE, m);
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
}
/*
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 47ec2b4..252f8d5 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -103,14 +103,14 @@ vm_page_zero_idle(void)
static int free_rover;
vm_page_t m;
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
zero_state = 0;
m = vm_pageq_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
vm_pageq_remove_nowakeup(m);
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
pmap_zero_page_idle(m);
- mtx_lock_spin(&vm_page_queue_free_mtx);
+ mtx_lock(&vm_page_queue_free_mtx);
m->flags |= PG_ZERO;
vm_pageq_enqueue(PQ_FREE + m->pc, m);
++vm_page_zero_count;
@@ -119,7 +119,7 @@ vm_page_zero_idle(void)
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME2) & PQ_COLORMASK;
- mtx_unlock_spin(&vm_page_queue_free_mtx);
+ mtx_unlock(&vm_page_queue_free_mtx);
}
/* Called by vm_page_free to hint that a new page is available. */
OpenPOWER on IntegriCloud