summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-07-02 15:02:51 +0000
committeralc <alc@FreeBSD.org>2010-07-02 15:02:51 +0000
commit50ab2ca4b1b445046b0539ebcdf8d35a3c36a005 (patch)
treefb5f25775173492a3b45324086efae1cc889215e /sys/vm
parent6e640459754dcfd9bff47693ad9d420dba7b9c32 (diff)
downloadFreeBSD-src-50ab2ca4b1b445046b0539ebcdf8d35a3c36a005.zip
FreeBSD-src-50ab2ca4b1b445046b0539ebcdf8d35a3c36a005.tar.gz
With the demise of page coloring, the page queue macros no longer serve any
useful purpose. Eliminate them. Reviewed by: kib
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_contig.c2
-rw-r--r--sys/vm/vm_page.c22
-rw-r--r--sys/vm/vm_page.h12
-rw-r--r--sys/vm/vm_pageout.c9
4 files changed, 16 insertions, 29 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index b9e0579..c0a9fcd 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -175,7 +175,7 @@ vm_contig_launder(int queue, vm_paddr_t low, vm_paddr_t high)
vm_page_unlock(m);
continue;
}
- KASSERT(VM_PAGE_INQUEUE2(m, queue),
+ KASSERT(m->queue == queue,
("vm_contig_launder: page %p's queue is not %d", m, queue));
error = vm_contig_launder_page(m, &next);
vm_page_lock_assert(m, MA_NOTOWNED);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 56effed..b1354e9 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -592,7 +592,7 @@ vm_page_unhold(vm_page_t mem)
vm_page_lock_assert(mem, MA_OWNED);
--mem->hold_count;
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
- if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD))
+ if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
vm_page_free_toq(mem);
}
@@ -1381,10 +1381,11 @@ vm_waitpfault(void)
void
vm_page_requeue(vm_page_t m)
{
- int queue = VM_PAGE_GETQUEUE(m);
struct vpgqueues *vpq;
+ int queue;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ queue = m->queue;
KASSERT(queue != PQ_NONE,
("vm_page_requeue: page %p is not queued", m));
vpq = &vm_page_queues[queue];
@@ -1422,12 +1423,12 @@ vm_page_queue_remove(int queue, vm_page_t m)
void
vm_pageq_remove(vm_page_t m)
{
- int queue = VM_PAGE_GETQUEUE(m);
+ int queue;
vm_page_lock_assert(m, MA_OWNED);
- if (queue != PQ_NONE) {
+ if ((queue = m->queue) != PQ_NONE) {
vm_page_lock_queues();
- VM_PAGE_SETQUEUE2(m, PQ_NONE);
+ m->queue = PQ_NONE;
vm_page_queue_remove(queue, m);
vm_page_unlock_queues();
}
@@ -1446,7 +1447,7 @@ vm_page_enqueue(int queue, vm_page_t m)
struct vpgqueues *vpq;
vpq = &vm_page_queues[queue];
- VM_PAGE_SETQUEUE2(m, queue);
+ m->queue = queue;
TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
++*vpq->cnt;
}
@@ -1467,7 +1468,7 @@ vm_page_activate(vm_page_t m)
int queue;
vm_page_lock_assert(m, MA_OWNED);
- if ((queue = VM_PAGE_GETKNOWNQUEUE2(m)) != PQ_ACTIVE) {
+ if ((queue = m->queue) != PQ_ACTIVE) {
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
m->act_count = ACT_INIT;
@@ -1728,7 +1729,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
/*
* Ignore if already inactive.
*/
- if ((queue = VM_PAGE_GETKNOWNQUEUE2(m)) == PQ_INACTIVE)
+ if ((queue = m->queue) == PQ_INACTIVE)
return;
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
vm_page_lock_queues();
@@ -1741,7 +1742,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
else
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m,
pageq);
- VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
+ m->queue = PQ_INACTIVE;
cnt.v_inactive_count++;
vm_page_unlock_queues();
}
@@ -1954,8 +1955,7 @@ vm_page_dontneed(vm_page_t m)
/*
* Occasionally leave the page alone.
*/
- if ((dnw & 0x01F0) == 0 ||
- VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) {
+ if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) {
if (m->act_count >= ACT_INIT)
--m->act_count;
return;
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index ee2f8d8..54b9edd 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -153,18 +153,6 @@ struct vm_page {
#define PQ_HOLD 3
#define PQ_COUNT 4
-/* Returns the real queue a page is on. */
-#define VM_PAGE_GETQUEUE(m) ((m)->queue)
-
-/* Returns the well known queue a page is on. */
-#define VM_PAGE_GETKNOWNQUEUE2(m) VM_PAGE_GETQUEUE(m)
-
-/* Returns true if the page is in the named well known queue. */
-#define VM_PAGE_INQUEUE2(m, q) (VM_PAGE_GETKNOWNQUEUE2(m) == (q))
-
-/* Sets the queue a page is on. */
-#define VM_PAGE_SETQUEUE2(m, q) (VM_PAGE_GETQUEUE(m) = (q))
-
struct vpgqueues {
struct pglist pl;
int *cnt;
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 8a31573..d664938 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -773,9 +773,8 @@ rescan0:
cnt.v_pdpages++;
- if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
+ if (m->queue != PQ_INACTIVE)
goto rescan0;
- }
next = TAILQ_NEXT(m, pageq);
@@ -1025,7 +1024,7 @@ rescan0:
* above. The page might have been freed and
* reused for another vnode.
*/
- if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE ||
+ if (m->queue != PQ_INACTIVE ||
m->object != object ||
TAILQ_NEXT(m, pageq) != &marker) {
vm_page_unlock(m);
@@ -1115,7 +1114,7 @@ unlock_and_continue:
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
- KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
+ KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_scan: page %p isn't active", m));
next = TAILQ_NEXT(m, pageq);
@@ -1379,7 +1378,7 @@ vm_pageout_page_stats()
while ((m != NULL) && (pcount-- > 0)) {
int actcount;
- KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
+ KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_page_stats: page %p isn't active", m));
next = TAILQ_NEXT(m, pageq);
OpenPOWER on IntegriCloud