summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-08-10 17:36:42 +0000
committerkib <kib@FreeBSD.org>2013-08-10 17:36:42 +0000
commit4675fcfce0ca7178b334ea67fe2d3ab745477a95 (patch)
tree2ae4177fdf8f77bdbf75571d149fb9e54db96bda /sys/amd64
parent29e6d17ad1d88a5156b4c44ff927a4ae7be2c279 (diff)
downloadFreeBSD-src-4675fcfce0ca7178b334ea67fe2d3ab745477a95.zip
FreeBSD-src-4675fcfce0ca7178b334ea67fe2d3ab745477a95.tar.gz
Different consumers of the struct vm_page abuse pageq member to keep
additional information, when the page is guaranteed to not belong to a paging queue. Usually, this results in a lot of type casts which make reasoning about the code correctness harder. Sometimes m->object is used instead of pageq, which could cause real and confusing bugs if non-NULL m->object is leaked. See r141955 and r253140 for examples. Change the pageq member into a union containing explicitly-typed members. Use them instead of type-punning or abusing m->object in x86 pmaps, uma and vm_page_alloc_contig(). Requested and reviewed by: alc Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c112
1 files changed, 58 insertions, 54 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index ef267f5..5f66384 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -295,13 +295,12 @@ static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
vm_prot_t prot);
static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
- vm_page_t *free, struct rwlock **lockp);
-static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
- vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free,
- struct rwlock **lockp);
+ struct spglist *free, struct rwlock **lockp);
+static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
+ pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
- vm_page_t *free);
+ struct spglist *free);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m, struct rwlock **lockp);
static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
@@ -316,8 +315,8 @@ static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
struct rwlock **lockp);
static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_page_t *free);
-static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
+ struct spglist *free);
+static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
/*
@@ -1490,14 +1489,12 @@ pmap_qremove(vm_offset_t sva, int count)
* Page table page management routines.....
***************************************************/
static __inline void
-pmap_free_zero_pages(vm_page_t free)
+pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
- while (free != NULL) {
- m = free;
- free = (void *)m->object;
- m->object = NULL;
+ while ((m = SLIST_FIRST(free)) != NULL) {
+ SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
@@ -1509,15 +1506,15 @@ pmap_free_zero_pages(vm_page_t free)
* physical memory manager after the TLB has been updated.
*/
static __inline void
-pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
+pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
+ boolean_t set_PG_ZERO)
{
if (set_PG_ZERO)
m->flags |= PG_ZERO;
else
m->flags &= ~PG_ZERO;
- m->object = (void *)*free;
- *free = m;
+ SLIST_INSERT_HEAD(free, m, plinks.s.ss);
}
/*
@@ -1567,7 +1564,7 @@ pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
* page table page was unmapped and FALSE otherwise.
*/
static inline boolean_t
-pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
+pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->wire_count;
@@ -1579,7 +1576,7 @@ pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
}
static void
-_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
+_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1637,7 +1634,8 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
* conditionally free the page, and manage the hold/wire counts.
*/
static int
-pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free)
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
+ struct spglist *free)
{
vm_page_t mpte;
@@ -2123,7 +2121,8 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
pt_entry_t *pte, tpte;
pv_entry_t pv;
vm_offset_t va;
- vm_page_t free, m, m_pc;
+ vm_page_t m, m_pc;
+ struct spglist free;
uint64_t inuse;
int bit, field, freed;
@@ -2131,10 +2130,11 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
pmap = NULL;
- free = m_pc = NULL;
+ m_pc = NULL;
+ SLIST_INIT(&free);
TAILQ_INIT(&new_tail);
mtx_lock(&pv_chunks_mutex);
- while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && free == NULL) {
+ while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) {
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
mtx_unlock(&pv_chunks_mutex);
if (pmap != pc->pc_pmap) {
@@ -2193,7 +2193,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
}
}
pc->pc_map[field] |= 1UL << bit;
- pmap_unuse_pt(pmap, va, *pde, &free);
+ pmap_unuse_pt(pmap, va, *pde, &free);
freed++;
}
}
@@ -2233,15 +2233,14 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
if (pmap != locked_pmap)
PMAP_UNLOCK(pmap);
}
- if (m_pc == NULL && free != NULL) {
- m_pc = free;
- free = (void *)m_pc->object;
- m_pc->object = NULL;
+ if (m_pc == NULL && !SLIST_EMPTY(&free)) {
+ m_pc = SLIST_FIRST(&free);
+ SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
atomic_add_int(&cnt.v_wire_count, 1);
}
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
return (m_pc);
}
@@ -2690,7 +2689,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
pd_entry_t newpde, oldpde;
pt_entry_t *firstpte, newpte;
vm_paddr_t mptepa;
- vm_page_t free, mpte;
+ vm_page_t mpte;
+ struct spglist free;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpde = *pde;
@@ -2720,11 +2720,11 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
- free = NULL;
+ SLIST_INIT(&free);
pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free,
lockp);
pmap_invalidate_page(pmap, trunc_2mpage(va));
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
@@ -2845,7 +2845,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
*/
static int
pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
- vm_page_t *free, struct rwlock **lockp)
+ struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
pd_entry_t oldpde;
@@ -2904,7 +2904,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
*/
static int
pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
- pd_entry_t ptepde, vm_page_t *free, struct rwlock **lockp)
+ pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
pt_entry_t oldpte;
@@ -2937,7 +2937,8 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
* Remove a single page from a process address space
*/
static void
-pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free)
+pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
+ struct spglist *free)
{
struct rwlock *lock;
pt_entry_t *pte;
@@ -2970,7 +2971,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pdp_entry_t *pdpe;
pd_entry_t ptpaddr, *pde;
pt_entry_t *pte;
- vm_page_t free = NULL;
+ struct spglist free;
int anyvalid;
/*
@@ -2980,6 +2981,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
return;
anyvalid = 0;
+ SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
@@ -3098,7 +3100,7 @@ out:
pmap_invalidate_all(pmap);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
@@ -3123,11 +3125,11 @@ pmap_remove_all(vm_page_t m)
pt_entry_t *pte, tpte;
pd_entry_t *pde;
vm_offset_t va;
- vm_page_t free;
+ struct spglist free;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
- free = NULL;
+ SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
if ((m->flags & PG_FICTITIOUS) != 0)
goto small_mappings;
@@ -3169,7 +3171,7 @@ small_mappings:
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_wunlock(&pvh_global_lock);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
@@ -3692,7 +3694,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
struct rwlock **lockp)
{
pd_entry_t *pde, newpde;
- vm_page_t free, mpde;
+ vm_page_t mpde;
+ struct spglist free;
rw_assert(&pvh_global_lock, RA_LOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -3721,10 +3724,10 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m),
lockp)) {
- free = NULL;
+ SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpde, &free)) {
pmap_invalidate_page(pmap, va);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@@ -3827,7 +3830,7 @@ static vm_page_t
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
{
- vm_page_t free;
+ struct spglist free;
pt_entry_t *pte;
vm_paddr_t pa;
@@ -3898,10 +3901,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((m->oflags & VPO_UNMANAGED) == 0 &&
!pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
if (mpte != NULL) {
- free = NULL;
+ SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
pmap_invalidate_page(pmap, va);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
mpte = NULL;
}
@@ -4096,7 +4099,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t src_addr)
{
struct rwlock *lock;
- vm_page_t free;
+ struct spglist free;
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t va_next;
@@ -4204,13 +4207,13 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*dst_pte = ptetemp & ~(PG_W | PG_M |
PG_A);
pmap_resident_count_inc(dst_pmap, 1);
- } else {
- free = NULL;
+ } else {
+ SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, addr,
dstmpte, &free)) {
- pmap_invalidate_page(dst_pmap,
- addr);
- pmap_free_zero_pages(free);
+ pmap_invalidate_page(dst_pmap,
+ addr);
+ pmap_free_zero_pages(&free);
}
goto out;
}
@@ -4227,10 +4230,10 @@ out:
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(src_pmap);
PMAP_UNLOCK(dst_pmap);
-}
+}
/*
- * pmap_zero_page zeros the specified hardware page by mapping
+ * pmap_zero_page zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.
*/
void
@@ -4445,7 +4448,7 @@ pmap_remove_pages(pmap_t pmap)
{
pd_entry_t ptepde;
pt_entry_t *pte, tpte;
- vm_page_t free = NULL;
+ struct spglist free;
vm_page_t m, mpte, mt;
pv_entry_t pv;
struct md_page *pvh;
@@ -4462,6 +4465,7 @@ pmap_remove_pages(pmap_t pmap)
return;
}
lock = NULL;
+ SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
@@ -4597,7 +4601,7 @@ pmap_remove_pages(pmap_t pmap)
pmap_invalidate_all(pmap);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
OpenPOWER on IntegriCloud