summaryrefslogtreecommitdiffstats
path: root/sys/dev/drm2/ttm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-08-10 17:36:42 +0000
committerkib <kib@FreeBSD.org>2013-08-10 17:36:42 +0000
commit4675fcfce0ca7178b334ea67fe2d3ab745477a95 (patch)
tree2ae4177fdf8f77bdbf75571d149fb9e54db96bda /sys/dev/drm2/ttm
parent29e6d17ad1d88a5156b4c44ff927a4ae7be2c279 (diff)
downloadFreeBSD-src-4675fcfce0ca7178b334ea67fe2d3ab745477a95.zip
FreeBSD-src-4675fcfce0ca7178b334ea67fe2d3ab745477a95.tar.gz
Different consumers of the struct vm_page abuse pageq member to keep
additional information, when the page is guaranteed to not belong to a paging queue. Usually, this results in a lot of type casts which make reasoning about the code correctness harder. Sometimes m->object is used instead of pageq, which could cause real and confusing bugs if non-NULL m->object is leaked. See r141955 and r253140 for examples. Change the pageq member into a union containing explicitly-typed members. Use them instead of type-punning or abusing m->object in x86 pmaps, uma and vm_page_alloc_contig(). Requested and reviewed by: alc Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/dev/drm2/ttm')
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.c b/sys/dev/drm2/ttm/ttm_page_alloc.c
index 9a30a46..71a1efa 100644
--- a/sys/dev/drm2/ttm/ttm_page_alloc.c
+++ b/sys/dev/drm2/ttm/ttm_page_alloc.c
@@ -330,7 +330,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
restart:
mtx_lock(&pool->lock);
- TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
+ TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) {
if (freed_pages >= npages_to_free)
break;
@@ -338,7 +338,7 @@ restart:
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
/* remove range of pages from the pool */
- TAILQ_REMOVE(&pool->list, p, pageq);
+ TAILQ_REMOVE(&pool->list, p, plinks.q);
ttm_pool_update_free_locked(pool, freed_pages);
/**
@@ -373,7 +373,7 @@ restart:
/* remove range of pages from the pool */
if (freed_pages) {
- TAILQ_REMOVE(&pool->list, p, pageq);
+ TAILQ_REMOVE(&pool->list, p, plinks.q);
ttm_pool_update_free_locked(pool, freed_pages);
nr_free -= freed_pages;
@@ -470,7 +470,7 @@ static void ttm_handle_caching_state_failure(struct pglist *pages,
unsigned i;
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
- TAILQ_REMOVE(pages, failed_pages[i], pageq);
+ TAILQ_REMOVE(pages, failed_pages[i], plinks.q);
ttm_vm_page_free(failed_pages[i]);
}
}
@@ -545,7 +545,7 @@ static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
}
}
- TAILQ_INSERT_HEAD(pages, p, pageq);
+ TAILQ_INSERT_HEAD(pages, p, plinks.q);
}
if (cpages) {
@@ -600,16 +600,16 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
mtx_lock(&pool->lock);
if (!r) {
- TAILQ_CONCAT(&pool->list, &new_pages, pageq);
+ TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
++pool->nrefills;
pool->npages += alloc_size;
} else {
printf("[TTM] Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- TAILQ_FOREACH(p, &pool->list, pageq) {
+ TAILQ_FOREACH(p, &pool->list, plinks.q) {
++cpages;
}
- TAILQ_CONCAT(&pool->list, &new_pages, pageq);
+ TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
pool->npages += cpages;
}
@@ -636,15 +636,15 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
if (count >= pool->npages) {
/* take all pages from the pool */
- TAILQ_CONCAT(pages, &pool->list, pageq);
+ TAILQ_CONCAT(pages, &pool->list, plinks.q);
count -= pool->npages;
pool->npages = 0;
goto out;
}
for (i = 0; i < count; i++) {
p = TAILQ_FIRST(&pool->list);
- TAILQ_REMOVE(&pool->list, p, pageq);
- TAILQ_INSERT_TAIL(pages, p, pageq);
+ TAILQ_REMOVE(&pool->list, p, plinks.q);
+ TAILQ_INSERT_TAIL(pages, p, plinks.q);
}
pool->npages -= count;
count = 0;
@@ -674,7 +674,7 @@ static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
mtx_lock(&pool->lock);
for (i = 0; i < npages; i++) {
if (pages[i]) {
- TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
+ TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q);
pages[i] = NULL;
pool->npages++;
}
@@ -735,13 +735,13 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
TAILQ_INIT(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
- TAILQ_FOREACH(p, &plist, pageq) {
+ TAILQ_FOREACH(p, &plist, plinks.q) {
pages[count++] = p;
}
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- TAILQ_FOREACH(p, &plist, pageq) {
+ TAILQ_FOREACH(p, &plist, plinks.q) {
pmap_zero_page(p);
}
}
@@ -754,7 +754,7 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
TAILQ_INIT(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
npages);
- TAILQ_FOREACH(p, &plist, pageq) {
+ TAILQ_FOREACH(p, &plist, plinks.q) {
pages[count++] = p;
}
if (r) {
OpenPOWER on IntegriCloud