summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-24 14:26:57 +0000
committeralc <alc@FreeBSD.org>2010-05-24 14:26:57 +0000
commit32b13ee95703577767e8794ce5e896ad8fdbdee7 (patch)
tree9d773a8e937b9a4faf763308a1843594ec04df60 /sys/vm
parent95cb40b038035cd0d6e2b2b015f41a7d4dec0fcb (diff)
downloadFreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.zip
FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.tar.gz
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the page queues lock from pmap_mincore(). Push down the page queues lock into pmap_clear_modify(), pmap_clear_reference(), and pmap_is_modified(). Assert that these functions are never passed an unmanaged page. Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m: Contrary to what the comment says, pmap_mincore() is not simply an optimization. Without a complete pmap_mincore() implementation, mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED because only the pmap can provide this information. Eliminate the page queues lock from vfs_setdirty_locked_object(), vm_pageout_clean(), vm_object_page_collect_flush(), and vm_object_page_clean(). Generally speaking, these are all accesses to the page's dirty field, which are synchronized by the containing vm object's lock. Reduce the scope of the page queues lock in vm_object_madvise() and vm_page_dontneed(). Reviewed by: kib (an earlier version)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h3
-rw-r--r--sys/vm/vm_mmap.c93
-rw-r--r--sys/vm/vm_object.c35
-rw-r--r--sys/vm/vm_page.c7
-rw-r--r--sys/vm/vm_page.h6
-rw-r--r--sys/vm/vm_pageout.c6
6 files changed, 83 insertions, 67 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index e4d8e81..82f88c9 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -122,6 +122,8 @@ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
boolean_t pmap_is_referenced(vm_page_t m);
boolean_t pmap_ts_referenced(vm_page_t m);
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+int pmap_mincore(pmap_t pmap, vm_offset_t addr,
+ vm_paddr_t *locked_pa);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
@@ -141,7 +143,6 @@ void pmap_sync_icache(pmap_t, vm_offset_t, vm_size_t);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
-int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_activate(struct thread *td);
#define pmap_resident_count(pm) ((pm)->pm_stats.resident_count)
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index f9b3db3..99d5059 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -772,8 +772,13 @@ mincore(td, uap)
int vecindex, lastvecindex;
vm_map_entry_t current;
vm_map_entry_t entry;
+ vm_object_t object;
+ vm_paddr_t locked_pa;
+ vm_page_t m;
+ vm_pindex_t pindex;
int mincoreinfo;
unsigned int timestamp;
+ boolean_t locked;
/*
* Make sure that the addresses presented are valid for user
@@ -847,38 +852,74 @@ RestartScan:
* it can provide info as to whether we are the
* one referencing or modifying the page.
*/
- mincoreinfo = pmap_mincore(pmap, addr);
- if (!mincoreinfo) {
- vm_pindex_t pindex;
- vm_ooffset_t offset;
- vm_page_t m;
+ object = NULL;
+ locked_pa = 0;
+ retry:
+ m = NULL;
+ mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
+ if (locked_pa != 0) {
/*
- * calculate the page index into the object
+ * The page is mapped by this process but not
+ * both accessed and modified. It is also
+ * managed. Acquire the object lock so that
+ * other mappings might be examined.
*/
- offset = current->offset + (addr - current->start);
- pindex = OFF_TO_IDX(offset);
- VM_OBJECT_LOCK(current->object.vm_object);
- m = vm_page_lookup(current->object.vm_object,
- pindex);
+ m = PHYS_TO_VM_PAGE(locked_pa);
+ if (m->object != object) {
+ if (object != NULL)
+ VM_OBJECT_UNLOCK(object);
+ object = m->object;
+ locked = VM_OBJECT_TRYLOCK(object);
+ vm_page_unlock(m);
+ if (!locked) {
+ VM_OBJECT_LOCK(object);
+ vm_page_lock(m);
+ goto retry;
+ }
+ } else
+ vm_page_unlock(m);
+ KASSERT(m->valid == VM_PAGE_BITS_ALL,
+ ("mincore: page %p is mapped but invalid",
+ m));
+ } else if (mincoreinfo == 0) {
/*
- * if the page is resident, then gather information about
- * it.
+ * The page is not mapped by this process. If
+ * the object implements managed pages, then
+ * determine if the page is resident so that
+ * the mappings might be examined.
*/
- if (m != NULL && m->valid != 0) {
- mincoreinfo = MINCORE_INCORE;
- vm_page_lock(m);
- vm_page_lock_queues();
- if (m->dirty ||
- pmap_is_modified(m))
- mincoreinfo |= MINCORE_MODIFIED_OTHER;
- if ((m->flags & PG_REFERENCED) ||
- pmap_is_referenced(m))
- mincoreinfo |= MINCORE_REFERENCED_OTHER;
- vm_page_unlock_queues();
- vm_page_unlock(m);
+ if (current->object.vm_object != object) {
+ if (object != NULL)
+ VM_OBJECT_UNLOCK(object);
+ object = current->object.vm_object;
+ VM_OBJECT_LOCK(object);
}
- VM_OBJECT_UNLOCK(current->object.vm_object);
+ if (object->type == OBJT_DEFAULT ||
+ object->type == OBJT_SWAP ||
+ object->type == OBJT_VNODE) {
+ pindex = OFF_TO_IDX(current->offset +
+ (addr - current->start));
+ m = vm_page_lookup(object, pindex);
+ if (m != NULL && m->valid == 0)
+ m = NULL;
+ if (m != NULL)
+ mincoreinfo = MINCORE_INCORE;
+ }
+ }
+ if (m != NULL) {
+ /* Examine other mappings to the page. */
+ if (m->dirty == 0 && pmap_is_modified(m))
+ vm_page_dirty(m);
+ if (m->dirty != 0)
+ mincoreinfo |= MINCORE_MODIFIED_OTHER;
+ vm_page_lock_queues();
+ if ((m->flags & PG_REFERENCED) != 0 ||
+ pmap_is_referenced(m))
+ mincoreinfo |= MINCORE_REFERENCED_OTHER;
+ vm_page_unlock_queues();
}
+ if (object != NULL)
+ VM_OBJECT_UNLOCK(object);
/*
* subyte may page fault. In case it needs to modify
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 96ceb88..ae7af15 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -817,19 +817,13 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
++tscan;
continue;
}
- vm_page_lock(p);
- vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0) {
- vm_page_unlock_queues();
- vm_page_unlock(p);
if (--scanlimit == 0)
break;
++tscan;
continue;
}
- vm_page_unlock_queues();
- vm_page_unlock(p);
/*
* If we have been asked to skip nosync pages and
* this is a nosync page, we can't continue.
@@ -900,17 +894,11 @@ again:
continue;
}
- vm_page_lock(p);
- vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0) {
- vm_page_unlock_queues();
- vm_page_unlock(p);
p->oflags &= ~VPO_CLEANCHK;
continue;
}
- vm_page_unlock_queues();
- vm_page_unlock(p);
/*
* If we have been asked to skip nosync pages and this is a
* nosync page, skip it. Note that the object flags were
@@ -977,17 +965,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
(tp->oflags & VPO_CLEANCHK) == 0) ||
(tp->busy != 0))
break;
- vm_page_lock(tp);
- vm_page_lock_queues();
vm_page_test_dirty(tp);
if (tp->dirty == 0) {
- vm_page_unlock(tp);
- vm_page_unlock_queues();
tp->oflags &= ~VPO_CLEANCHK;
break;
}
- vm_page_unlock(tp);
- vm_page_unlock_queues();
maf[ i - 1 ] = tp;
maxf++;
continue;
@@ -1007,17 +989,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
(tp->oflags & VPO_CLEANCHK) == 0) ||
(tp->busy != 0))
break;
- vm_page_lock(tp);
- vm_page_lock_queues();
vm_page_test_dirty(tp);
if (tp->dirty == 0) {
- vm_page_unlock_queues();
- vm_page_unlock(tp);
tp->oflags &= ~VPO_CLEANCHK;
break;
}
- vm_page_unlock_queues();
- vm_page_unlock(tp);
mab[ i - 1 ] = tp;
maxb++;
continue;
@@ -1217,21 +1193,23 @@ shadowlookup:
* If the page is not in a normal state, skip it.
*/
vm_page_lock(m);
- vm_page_lock_queues();
if (m->hold_count != 0 || m->wire_count != 0) {
- vm_page_unlock_queues();
vm_page_unlock(m);
goto unlock_tobject;
}
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("vm_object_madvise: page %p is not managed", m));
if ((m->oflags & VPO_BUSY) || m->busy) {
- if (advise == MADV_WILLNEED)
+ if (advise == MADV_WILLNEED) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
+ vm_page_lock_queues();
vm_page_flag_set(m, PG_REFERENCED);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ }
vm_page_unlock(m);
if (object != tobject)
VM_OBJECT_UNLOCK(object);
@@ -1266,7 +1244,6 @@ shadowlookup:
m->act_count = 0;
vm_page_dontneed(m);
}
- vm_page_unlock_queues();
vm_page_unlock(m);
if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
swap_pager_freespace(tobject, tpindex, 1);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 0d5b393..64b7113 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1885,14 +1885,13 @@ vm_page_cache(vm_page_t m)
void
vm_page_dontneed(vm_page_t m)
{
- static int dnweight;
int dnw;
int head;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
- dnw = ++dnweight;
+ dnw = PCPU_GET(dnweight);
+ PCPU_INC(dnweight);
/*
* occassionally leave the page alone
@@ -1908,7 +1907,9 @@ vm_page_dontneed(vm_page_t m)
* Clear any references to the page. Otherwise, the page daemon will
* immediately reactivate the page.
*/
+ vm_page_lock_queues();
vm_page_flag_clear(m, PG_REFERENCED);
+ vm_page_unlock_queues();
pmap_clear_reference(m);
if (m->dirty == 0 && pmap_is_modified(m))
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index aebf79e..fb1cfda 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -194,8 +194,10 @@ extern struct vpglocks pa_lock[];
#define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
#define PA_UNLOCK_COND(pa) \
do { \
- if (pa) \
- PA_UNLOCK(pa); \
+ if ((pa) != 0) { \
+ PA_UNLOCK((pa)); \
+ (pa) = 0; \
+ } \
} while (0)
#define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index fde456f..3158a7d 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -391,17 +391,14 @@ more:
break;
}
vm_page_lock(p);
- vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0 ||
p->queue != PQ_INACTIVE ||
p->hold_count != 0) { /* may be undergoing I/O */
vm_page_unlock(p);
- vm_page_unlock_queues();
ib = 0;
break;
}
- vm_page_unlock_queues();
vm_page_unlock(p);
mc[--page_base] = p;
++pageout_count;
@@ -424,16 +421,13 @@ more:
break;
}
vm_page_lock(p);
- vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0 ||
p->queue != PQ_INACTIVE ||
p->hold_count != 0) { /* may be undergoing I/O */
- vm_page_unlock_queues();
vm_page_unlock(p);
break;
}
- vm_page_unlock_queues();
vm_page_unlock(p);
mc[page_base + pageout_count] = p;
++pageout_count;
OpenPOWER on IntegriCloud