summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-06-18 16:21:59 +0000
committeralc <alc@FreeBSD.org>2012-06-18 16:21:59 +0000
commit857159a4015cd937d3e24c6185a0e5f72d8ddc44 (patch)
tree93939282e00a267e4c3ff584eaf79c1202fc6503 /sys/amd64
parenta0b36d5a0f0c40ddf24df49d87f4c3821a55df16 (diff)
downloadFreeBSD-src-857159a4015cd937d3e24c6185a0e5f72d8ddc44.zip
FreeBSD-src-857159a4015cd937d3e24c6185a0e5f72d8ddc44.tar.gz
Add PV chunk and list locking to pmap_page_exists_quick(),
pmap_page_is_mapped(), and pmap_remove_pages(). These functions are no longer serialized by the pvh global lock.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c78
1 files changed, 63 insertions, 15 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index fbd247b..7d9be83 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -168,6 +168,14 @@ __FBSDID("$FreeBSD$");
#define pa_index(pa) ((pa) >> PDRSHIFT)
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
+#define NPV_LIST_LOCKS MAXCPU
+
+#define PHYS_TO_PV_LIST_LOCK(pa) \
+ (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
+
+#define VM_PAGE_TO_PV_LIST_LOCK(m) \
+ PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
+
struct pmap kernel_pmap_store;
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
@@ -214,7 +222,8 @@ static struct {
* Data for the pv entry allocation mechanism
*/
static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
-static long pv_entry_count;
+static struct mtx pv_chunks_mutex;
+static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
static struct md_page *pv_table;
/*
@@ -763,6 +772,17 @@ pmap_init(void)
}
/*
+ * Initialize the pv chunk list mutex.
+ */
+ mtx_init(&pv_chunks_mutex, "pv chunk list", NULL, MTX_DEF);
+
+ /*
+ * Initialize the pool of pv list locks.
+ */
+ for (i = 0; i < NPV_LIST_LOCKS; i++)
+ rw_init(&pv_list_locks[i], "pv list");
+
+ /*
* Calculate the size of the pv head table for superpages.
*/
for (i = 0; phys_avail[i + 1]; i += 2);
@@ -2023,6 +2043,7 @@ pv_to_chunk(pv_entry_t pv)
static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
+static long pv_entry_count;
SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
"Current number of pv entries");
@@ -2215,10 +2236,12 @@ free_pv_chunk(struct pv_chunk *pc)
{
vm_page_t m;
+ mtx_lock(&pv_chunks_mutex);
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
- PV_STAT(pv_entry_spare -= _NPCPV);
- PV_STAT(pc_chunk_count--);
- PV_STAT(pc_chunk_frees++);
+ mtx_unlock(&pv_chunks_mutex);
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
+ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
/* entire chunk is free, return it */
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
dump_drop_page(m->phys_addr);
@@ -4000,6 +4023,7 @@ boolean_t
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
struct md_page *pvh;
+ struct rwlock *lock;
pv_entry_t pv;
int loops = 0;
boolean_t rv;
@@ -4007,7 +4031,9 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
- rw_wlock(&pvh_global_lock);
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
@@ -4029,7 +4055,8 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
break;
}
}
- rw_wunlock(&pvh_global_lock);
+ rw_runlock(lock);
+ rw_runlock(&pvh_global_lock);
return (rv);
}
@@ -4088,15 +4115,19 @@ pmap_pvh_wired_mappings(struct md_page *pvh, int count)
boolean_t
pmap_page_is_mapped(vm_page_t m)
{
+ struct rwlock *lock;
boolean_t rv;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (FALSE);
- rw_wlock(&pvh_global_lock);
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
((m->flags & PG_FICTITIOUS) == 0 &&
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
- rw_wunlock(&pvh_global_lock);
+ rw_runlock(lock);
+ rw_runlock(&pvh_global_lock);
return (rv);
}
@@ -4118,19 +4149,21 @@ pmap_remove_pages(pmap_t pmap)
pv_entry_t pv;
struct md_page *pvh;
struct pv_chunk *pc, *npc;
- int field, idx;
+ struct rwlock *lock, *new_lock;
int64_t bit;
uint64_t inuse, bitmask;
- int allfree;
+ int allfree, field, freed, idx;
if (pmap != PCPU_GET(curpmap)) {
printf("warning: pmap_remove_pages called with non-current pmap\n");
return;
}
- rw_wlock(&pvh_global_lock);
+ rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
+ lock = NULL;
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
allfree = 1;
+ freed = 0;
for (field = 0; field < _NPCM; field++) {
inuse = ~pc->pc_map[field] & pc_freemask[field];
while (inuse != 0) {
@@ -4186,10 +4219,15 @@ pmap_remove_pages(pmap_t pmap)
vm_page_dirty(m);
}
+ new_lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ if (new_lock != lock) {
+ if (lock != NULL)
+ rw_wunlock(lock);
+ lock = new_lock;
+ rw_wlock(lock);
+ }
+
/* Mark free */
- PV_STAT(pv_entry_frees++);
- PV_STAT(pv_entry_spare++);
- pv_entry_count--;
pc->pc_map[field] |= bitmask;
if ((tpte & PG_PS) != 0) {
pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
@@ -4223,15 +4261,25 @@ pmap_remove_pages(pmap_t pmap)
}
}
pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
+ freed++;
}
}
+ PV_STAT(atomic_add_long(&pv_entry_frees, freed));
+ PV_STAT(atomic_add_int(&pv_entry_spare, freed));
+ atomic_subtract_long(&pv_entry_count, freed);
if (allfree) {
+ if (lock != NULL) {
+ rw_wunlock(lock);
+ lock = NULL;
+ }
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
free_pv_chunk(pc);
}
}
+ if (lock != NULL)
+ rw_wunlock(lock);
pmap_invalidate_all(pmap);
- rw_wunlock(&pvh_global_lock);
+ rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(free);
}
OpenPOWER on IntegriCloud