summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2009-06-21 20:29:14 +0000
committeralc <alc@FreeBSD.org>2009-06-21 20:29:14 +0000
commitd092e8e13d0d2c4c93ccc14801eb033e16b09bb1 (patch)
tree764610a3fb546165d2afe716b96e65a11df0e7db /sys/vm
parentb5481f96fb5b0f8821f819dbae822c405e250cd2 (diff)
downloadFreeBSD-src-d092e8e13d0d2c4c93ccc14801eb033e16b09bb1.zip
FreeBSD-src-d092e8e13d0d2c4c93ccc14801eb033e16b09bb1.tar.gz
Implement a mechanism within vm_phys_alloc_contig() to defer all necessary
calls to vdrop() until after the free page queues lock is released. This eliminates repeatedly releasing and reacquiring the free page queues lock each time the last cached page is reclaimed from a vnode-backed object.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_phys.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index 0de35cd..f64bc9c 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -594,7 +594,7 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
struct vm_phys_seg *seg;
vm_object_t m_object;
vm_paddr_t pa, pa_last, size;
- vm_page_t m, m_ret;
+ vm_page_t deferred_vdrop_list, m, m_ret;
int flind, i, oind, order, pind;
size = npages << PAGE_SHIFT;
@@ -604,6 +604,7 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
("vm_phys_alloc_contig: alignment must be a power of 2"));
KASSERT((boundary & (boundary - 1)) == 0,
("vm_phys_alloc_contig: boundary must be a power of 2"));
+ deferred_vdrop_list = NULL;
/* Compute the queue that is the best fit for npages. */
for (order = 0; (1 << order) < npages; order++);
mtx_lock(&vm_page_queue_free_mtx);
@@ -697,10 +698,23 @@ done:
("vm_phys_alloc_contig: page %p is busy", m));
KASSERT(m->dirty == 0,
("vm_phys_alloc_contig: page %p is dirty", m));
- m_object = m->object;
if ((m->flags & PG_CACHED) != 0) {
m->valid = 0;
+ m_object = m->object;
vm_page_cache_remove(m);
+ if (m_object->type == OBJT_VNODE &&
+ m_object->cache == NULL) {
+ /*
+ * Enqueue the vnode for deferred vdrop().
+ *
+ * Unmanaged pages don't use "pageq", so it
+ * can be safely abused to construct a short-
+ * lived queue of vnodes.
+ */
+ m->pageq.tqe_prev = m_object->handle;
+ m->pageq.tqe_next = deferred_vdrop_list;
+ deferred_vdrop_list = m;
+ }
} else {
KASSERT(VM_PAGE_IS_FREE(m),
("vm_phys_alloc_contig: page %p is not free", m));
@@ -714,13 +728,6 @@ done:
m->flags = PG_UNMANAGED | (m->flags & PG_ZERO);
m->oflags = 0;
/* Unmanaged pages don't use "act_count". */
- if (m_object != NULL &&
- m_object->type == OBJT_VNODE &&
- m_object->cache == NULL) {
- mtx_unlock(&vm_page_queue_free_mtx);
- vdrop(m_object->handle);
- mtx_lock(&vm_page_queue_free_mtx);
- }
}
for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
m = &m_ret[i];
@@ -730,6 +737,10 @@ done:
vm_phys_free_pages(m, 0);
}
mtx_unlock(&vm_page_queue_free_mtx);
+ while (deferred_vdrop_list != NULL) {
+ vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
+ deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
+ }
return (m_ret);
}
OpenPOWER on IntegriCloud