summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_object.c
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/vm/vm_object.c
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/vm/vm_object.c')
-rw-r--r--sys/vm/vm_object.c89
1 files changed, 77 insertions, 12 deletions
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index c25ab77..47ef973 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -717,19 +717,21 @@ vm_object_terminate(vm_object_t object)
* removes them from paging queues. Don't free wired pages, just
* remove them from the object.
*/
- vm_page_lock_queues();
while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
("vm_object_terminate: freeing busy page %p "
"p->busy = %d, p->oflags %x\n", p, p->busy, p->oflags));
+ vm_page_lock(p);
+ vm_page_lock_queues();
if (p->wire_count == 0) {
vm_page_free(p);
cnt.v_pfree++;
} else {
vm_page_remove(p);
}
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
}
- vm_page_unlock_queues();
#if VM_NRESERVLEVEL > 0
if (__predict_false(!LIST_EMPTY(&object->rvq)))
@@ -789,7 +791,6 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
tend = end;
}
- vm_page_lock_queues();
/*
* If the caller is smart and only msync()s a range he knows is
* dirty, we may be able to avoid an object scan. This results in
@@ -818,8 +819,12 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
++tscan;
continue;
}
+ vm_page_lock(p);
+ vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0) {
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
if (--scanlimit == 0)
break;
++tscan;
@@ -830,6 +835,8 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
* this is a nosync page, we can't continue.
*/
if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
if (--scanlimit == 0)
break;
++tscan;
@@ -842,6 +849,8 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
* page (i.e. had to sleep).
*/
tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags);
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
}
/*
@@ -871,8 +880,13 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
p->oflags |= VPO_CLEANCHK;
if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC))
clearobjflags = 0;
- else
+ else {
+ vm_page_lock(p);
+ vm_page_lock_queues();
pmap_remove_write(p);
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
+ }
}
if (clearobjflags && (tstart == 0) && (tend == object->size))
@@ -895,8 +909,12 @@ again:
continue;
}
+ vm_page_lock(p);
+ vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0) {
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
p->oflags &= ~VPO_CLEANCHK;
continue;
}
@@ -907,28 +925,37 @@ again:
* not cleared in this case so we do not have to set them.
*/
if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
p->oflags &= ~VPO_CLEANCHK;
continue;
}
n = vm_object_page_collect_flush(object, p,
curgeneration, pagerflags);
- if (n == 0)
+ if (n == 0) {
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
goto rescan;
+ }
- if (object->generation != curgeneration)
+ if (object->generation != curgeneration) {
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
goto rescan;
+ }
/*
* Try to optimize the next page. If we can't we pick up
* our (random) scan where we left off.
*/
if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) {
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
if ((p = vm_page_lookup(object, pi + n)) != NULL)
goto again;
}
}
- vm_page_unlock_queues();
#if 0
VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
#endif
@@ -951,8 +978,10 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
vm_page_t ma[vm_pageout_page_count];
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ vm_page_lock_assert(p, MA_OWNED);
pi = p->pindex;
while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
+ vm_page_lock(p);
vm_page_lock_queues();
if (object->generation != curgeneration) {
return(0);
@@ -968,11 +997,16 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
(tp->oflags & VPO_CLEANCHK) == 0) ||
(tp->busy != 0))
break;
+ vm_page_unlock_queues();
+ vm_page_lock(tp);
+ vm_page_lock_queues();
vm_page_test_dirty(tp);
if (tp->dirty == 0) {
+ vm_page_unlock(tp);
tp->oflags &= ~VPO_CLEANCHK;
break;
}
+ vm_page_unlock(tp);
maf[ i - 1 ] = tp;
maxf++;
continue;
@@ -992,11 +1026,16 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
(tp->oflags & VPO_CLEANCHK) == 0) ||
(tp->busy != 0))
break;
+ vm_page_unlock_queues();
+ vm_page_lock(tp);
+ vm_page_lock_queues();
vm_page_test_dirty(tp);
if (tp->dirty == 0) {
+ vm_page_unlock(tp);
tp->oflags &= ~VPO_CLEANCHK;
break;
}
+ vm_page_unlock(tp);
mab[ i - 1 ] = tp;
maxb++;
continue;
@@ -1022,7 +1061,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
vm_pageout_flush(ma, runlen, pagerflags);
for (i = 0; i < runlen; i++) {
if (ma[i]->dirty) {
+ vm_page_unlock_queues();
+ vm_page_lock(ma[i]);
+ vm_page_lock_queues();
pmap_remove_write(ma[i]);
+ vm_page_unlock(ma[i]);
ma[i]->oflags |= VPO_CLEANCHK;
/*
@@ -1195,9 +1238,11 @@ shadowlookup:
/*
* If the page is not in a normal state, skip it.
*/
+ vm_page_lock(m);
vm_page_lock_queues();
if (m->hold_count != 0 || m->wire_count != 0) {
vm_page_unlock_queues();
+ vm_page_unlock(m);
goto unlock_tobject;
}
if ((m->oflags & VPO_BUSY) || m->busy) {
@@ -1209,6 +1254,7 @@ shadowlookup:
*/
vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
+ vm_page_unlock(m);
if (object != tobject)
VM_OBJECT_UNLOCK(object);
m->oflags |= VPO_WANTED;
@@ -1243,6 +1289,7 @@ shadowlookup:
vm_page_dontneed(m);
}
vm_page_unlock_queues();
+ vm_page_unlock(m);
if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
swap_pager_freespace(tobject, tpindex, 1);
unlock_tobject:
@@ -1405,7 +1452,6 @@ retry:
m = TAILQ_NEXT(m, listq);
}
}
- vm_page_lock_queues();
for (; m != NULL && (idx = m->pindex - offidxstart) < size;
m = m_next) {
m_next = TAILQ_NEXT(m, listq);
@@ -1425,11 +1471,14 @@ retry:
VM_OBJECT_LOCK(new_object);
goto retry;
}
+ vm_page_lock(m);
+ vm_page_lock_queues();
vm_page_rename(m, new_object, idx);
+ vm_page_unlock_queues();
+ vm_page_unlock(m);
/* page automatically made dirty by rename and cache handled */
vm_page_busy(m);
}
- vm_page_unlock_queues();
if (orig_object->type == OBJT_SWAP) {
/*
* swap_pager_copy() can sleep, in which case the orig_object's
@@ -1597,6 +1646,7 @@ vm_object_backing_scan(vm_object_t object, int op)
* Page is out of the parent object's range, we
* can simply destroy it.
*/
+ vm_page_lock(p);
vm_page_lock_queues();
KASSERT(!pmap_page_is_mapped(p),
("freeing mapped page %p", p));
@@ -1605,6 +1655,7 @@ vm_object_backing_scan(vm_object_t object, int op)
else
vm_page_remove(p);
vm_page_unlock_queues();
+ vm_page_unlock(p);
p = next;
continue;
}
@@ -1621,6 +1672,7 @@ vm_object_backing_scan(vm_object_t object, int op)
*
* Leave the parent's page alone
*/
+ vm_page_lock(p);
vm_page_lock_queues();
KASSERT(!pmap_page_is_mapped(p),
("freeing mapped page %p", p));
@@ -1629,6 +1681,7 @@ vm_object_backing_scan(vm_object_t object, int op)
else
vm_page_remove(p);
vm_page_unlock_queues();
+ vm_page_unlock(p);
p = next;
continue;
}
@@ -1648,9 +1701,11 @@ vm_object_backing_scan(vm_object_t object, int op)
* If the page was mapped to a process, it can remain
* mapped through the rename.
*/
+ vm_page_lock(p);
vm_page_lock_queues();
vm_page_rename(p, object, new_pindex);
vm_page_unlock_queues();
+ vm_page_unlock(p);
/* page automatically made dirty by rename */
}
p = next;
@@ -1915,7 +1970,7 @@ again:
p = TAILQ_NEXT(p, listq);
}
}
- vm_page_lock_queues();
+
/*
* Assert: the variable p is either (1) the page with the
* least pindex greater than or equal to the parameter pindex
@@ -1934,6 +1989,8 @@ again:
* cannot be freed. They can, however, be invalidated
* if "clean_only" is FALSE.
*/
+ vm_page_lock(p);
+ vm_page_lock_queues();
if ((wirings = p->wire_count) != 0 &&
(wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
/* Fictitious pages do not have managed mappings. */
@@ -1945,6 +2002,8 @@ again:
p->valid = 0;
vm_page_undirty(p);
}
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
continue;
}
if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
@@ -1953,16 +2012,20 @@ again:
("vm_object_page_remove: page %p is fictitious", p));
if (clean_only && p->valid) {
pmap_remove_write(p);
- if (p->dirty)
+ if (p->dirty) {
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
continue;
+ }
}
pmap_remove_all(p);
/* Account for removal of managed, wired mappings. */
if (wirings != 0)
p->wire_count -= wirings;
vm_page_free(p);
+ vm_page_unlock_queues();
+ vm_page_unlock(p);
}
- vm_page_unlock_queues();
vm_object_pip_wakeup(object);
skipmemq:
if (__predict_false(object->cache != NULL))
@@ -1997,9 +2060,11 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
if (m == NULL)
break;
if (rv != VM_PAGER_OK) {
+ vm_page_lock(m);
vm_page_lock_queues();
vm_page_free(m);
vm_page_unlock_queues();
+ vm_page_unlock(m);
break;
}
}
OpenPOWER on IntegriCloud