summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-08-10 17:36:42 +0000
committerkib <kib@FreeBSD.org>2013-08-10 17:36:42 +0000
commit4675fcfce0ca7178b334ea67fe2d3ab745477a95 (patch)
tree2ae4177fdf8f77bdbf75571d149fb9e54db96bda /sys/vm/vm_pageout.c
parent29e6d17ad1d88a5156b4c44ff927a4ae7be2c279 (diff)
downloadFreeBSD-src-4675fcfce0ca7178b334ea67fe2d3ab745477a95.zip
FreeBSD-src-4675fcfce0ca7178b334ea67fe2d3ab745477a95.tar.gz
Different consumers of the struct vm_page abuse pageq member to keep
additional information, when the page is guaranteed to not belong to a paging queue. Usually, this results in a lot of type casts which make reasoning about the code correctness harder. Sometimes m->object is used instead of pageq, which could cause real and confusing bugs if non-NULL m->object is leaked. See r141955 and r253140 for examples. Change the pageq member into a union containing explicitly-typed members. Use them instead of type-punning or abusing m->object in x86 pmaps, uma and vm_page_alloc_contig(). Requested and reviewed by: alc Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index f801603..3a6399a 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -274,7 +274,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
pq = vm_page_pagequeue(m);
object = m->object;
- TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
+ TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
vm_pagequeue_unlock(pq);
vm_page_unlock(m);
VM_OBJECT_WLOCK(object);
@@ -282,11 +282,11 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
vm_pagequeue_lock(pq);
/* Page queue might have changed. */
- *next = TAILQ_NEXT(&marker, pageq);
+ *next = TAILQ_NEXT(&marker, plinks.q);
unchanged = (m->queue == queue &&
m->object == object &&
- &marker == TAILQ_NEXT(m, pageq));
- TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
+ &marker == TAILQ_NEXT(m, plinks.q));
+ TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
return (unchanged);
}
@@ -315,15 +315,15 @@ vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
vm_pageout_init_marker(&marker, queue);
pq = vm_page_pagequeue(m);
- TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
+ TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
vm_pagequeue_unlock(pq);
vm_page_lock(m);
vm_pagequeue_lock(pq);
/* Page queue might have changed. */
- *next = TAILQ_NEXT(&marker, pageq);
- unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq));
- TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
+ *next = TAILQ_NEXT(&marker, plinks.q);
+ unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, plinks.q));
+ TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
return (unchanged);
}
@@ -578,7 +578,7 @@ vm_pageout_launder(struct vm_pagequeue *pq, int tries, vm_paddr_t low,
vm_page_t m, m_tmp, next;
vm_pagequeue_lock(pq);
- TAILQ_FOREACH_SAFE(m, &pq->pq_pl, pageq, next) {
+ TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) {
if ((m->flags & PG_MARKER) != 0)
continue;
pa = VM_PAGE_TO_PHYS(m);
@@ -963,7 +963,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m));
PCPU_INC(cnt.v_pdpages);
- next = TAILQ_NEXT(m, pageq);
+ next = TAILQ_NEXT(m, plinks.q);
/*
* skip marker pages
@@ -1013,7 +1013,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* 'next' pointer. Use our marker to remember our
* place.
*/
- TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, pageq);
+ TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
vm_pagequeue_unlock(pq);
queues_locked = FALSE;
@@ -1207,7 +1207,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
*/
if (m->queue != PQ_INACTIVE ||
m->object != object ||
- TAILQ_NEXT(m, pageq) != &vmd->vmd_marker) {
+ TAILQ_NEXT(m, plinks.q) != &vmd->vmd_marker) {
vm_page_unlock(m);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
@@ -1277,8 +1277,8 @@ relock_queues:
vm_pagequeue_lock(pq);
queues_locked = TRUE;
}
- next = TAILQ_NEXT(&vmd->vmd_marker, pageq);
- TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, pageq);
+ next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
+ TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
}
vm_pagequeue_unlock(pq);
@@ -1304,7 +1304,7 @@ relock_queues:
KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_scan: page %p isn't active", m));
- next = TAILQ_NEXT(m, pageq);
+ next = TAILQ_NEXT(m, plinks.q);
if ((m->flags & PG_MARKER) != 0) {
m = next;
continue;
@@ -1612,7 +1612,7 @@ vm_pageout_page_stats(struct vm_domain *vmd)
KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_page_stats: page %p isn't active", m));
- next = TAILQ_NEXT(m, pageq);
+ next = TAILQ_NEXT(m, plinks.q);
if ((m->flags & PG_MARKER) != 0) {
m = next;
continue;
OpenPOWER on IntegriCloud