summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>1999-02-08 00:37:36 +0000
committerdillon <dillon@FreeBSD.org>1999-02-08 00:37:36 +0000
commitb7a0b99c31ad20c9d83cd00f353e622a25f80182 (patch)
tree287ab6f615e494be7a3958316918da47ac087e12 /sys/vm
parent3e732af5eab8cbd6d41b59b62aa54411e81cd91c (diff)
downloadFreeBSD-src-b7a0b99c31ad20c9d83cd00f353e622a25f80182.zip
FreeBSD-src-b7a0b99c31ad20c9d83cd00f353e622a25f80182.tar.gz
Rip out PQ_ZERO queue. PQ_ZERO functionality is now combined in with
PQ_FREE. There is little operational difference other then the kernel being a few kilobytes smaller and the code being more readable. * vm_page_select_free() has been *greatly* simplified. * The PQ_ZERO page queue and supporting structures have been removed * vm_page_zero_idle() revamped (see below) PG_ZERO setting and clearing has been migrated from vm_page_alloc() to vm_page_free[_zero]() and will eventually be guarenteed to remain tracked throughout a page's life ( if it isn't already ). When a page is freed, PG_ZERO pages are appended to the appropriate tailq in the PQ_FREE queue while non-PG_ZERO pages are prepended. When locating a new free page, PG_ZERO selection operates from within vm_page_list_find() ( get page from end of queue instead of beginning of queue ) and then only occurs in the nominal critical path case. If the nominal case misses, both normal and zero-page allocation devolves into the same _vm_page_list_find() select code without any specific zero-page optimizations. Additionally, vm_page_zero_idle() has been revamped. Hysteresis has been added and zero-page tracking adjusted to conform with the other changes. Currently hysteresis is set at 1/3 (lo) and 1/2 (hi) the number of free pages. We may wish to increase both parameters as time permits. The hysteresis is designed to avoid silly zeroing in borderline allocation/free situations.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_page.c125
-rw-r--r--sys/vm/vm_page.h42
-rw-r--r--sys/vm/vm_pageout.c4
3 files changed, 64 insertions, 107 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index b27d9b2..b9a44ae 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.123 1999/01/28 00:57:57 dillon Exp $
+ * $Id: vm_page.c,v 1.124 1999/02/07 20:45:15 dillon Exp $
*/
/*
@@ -87,8 +87,6 @@
#include <vm/vm_extern.h>
static void vm_page_queue_init __P((void));
-static vm_page_t _vm_page_select_free __P((vm_object_t object,
- vm_pindex_t pindex, int prefqueue));
static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t));
/*
@@ -102,7 +100,6 @@ static int vm_page_hash_mask; /* Mask for hash function */
static volatile int vm_page_bucket_generation;
struct pglist vm_page_queue_free[PQ_L2_SIZE] = {{0}};
-struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {{0}};
struct pglist vm_page_queue_active = {0};
struct pglist vm_page_queue_inactive = {0};
struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {{0}};
@@ -122,10 +119,6 @@ vm_page_queue_init(void) {
vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i];
vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
}
- for(i=0;i<PQ_L2_SIZE;i++) {
- vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i];
- vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count;
- }
vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive;
vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
@@ -726,7 +719,8 @@ vm_page_select_cache(object, pindex)
while (TRUE) {
m = vm_page_list_find(
PQ_CACHE,
- (pindex + object->pg_color) & PQ_L2_MASK
+ (pindex + object->pg_color) & PQ_L2_MASK,
+ FALSE
);
if (m && ((m->flags & PG_BUSY) || m->busy ||
m->hold_count || m->wire_count)) {
@@ -749,65 +743,18 @@ vm_page_select_cache(object, pindex)
*/
static __inline vm_page_t
-vm_page_select_free(vm_object_t object, vm_pindex_t pindex, int prefqueue)
-{
- vm_page_t m;
- int otherq = (prefqueue == PQ_ZERO) ? PQ_FREE : PQ_ZERO;
-
-#if PQ_L2_SIZE > 1
- int i = (pindex + object->pg_color) & PQ_L2_MASK;
-
- if ((m = TAILQ_FIRST(vm_page_queues[prefqueue+i].pl)) == NULL &&
- (m = TAILQ_FIRST(vm_page_queues[otherq+i].pl)) == NULL
- ) {
- m = _vm_page_select_free(object, pindex, prefqueue);
- }
-#else
- if ((m = TAILQ_FIRST(vm_page_queues[prefqueue].pl)) == NULL)
- m = TAILQ_FIRST(vm_page_queues[otherq].pl);
-#endif
- return(m);
-}
-
-#if PQ_L2_SIZE > 1
-
-static vm_page_t
-_vm_page_select_free(object, pindex, prefqueue)
- vm_object_t object;
- vm_pindex_t pindex;
- int prefqueue;
+vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
{
- int i;
- int index;
- vm_page_t m = NULL;
- struct vpgqueues *pq;
- struct vpgqueues *po;
-
- if (prefqueue == PQ_ZERO) {
- pq = &vm_page_queues[PQ_ZERO];
- po = &vm_page_queues[PQ_FREE];
- } else {
- pq = &vm_page_queues[PQ_FREE];
- po = &vm_page_queues[PQ_ZERO];
- }
-
- index = pindex + object->pg_color;
+ vm_page_t m;
- for(i = PQ_L2_SIZE / 2; i > 0; --i) {
- if ((m = TAILQ_FIRST(pq[(index+i) & PQ_L2_MASK].pl)) != NULL)
- break;
- if ((m = TAILQ_FIRST(po[(index+i) & PQ_L2_MASK].pl)) != NULL)
- break;
- if ((m = TAILQ_FIRST(pq[(index-i) & PQ_L2_MASK].pl)) != NULL)
- break;
- if ((m = TAILQ_FIRST(po[(index-i) & PQ_L2_MASK].pl)) != NULL)
- break;
- }
+ m = vm_page_list_find(
+ PQ_FREE,
+ (pindex + object->pg_color) & PQ_L2_MASK,
+ prefer_zero
+ );
return(m);
}
-#endif
-
/*
* vm_page_alloc:
*
@@ -859,7 +806,7 @@ loop:
case VM_ALLOC_NORMAL:
if (cnt.v_free_count >= cnt.v_free_reserved) {
- m = vm_page_select_free(object, pindex, PQ_FREE);
+ m = vm_page_select_free(object, pindex, FALSE);
KASSERT(m != NULL, ("vm_page_alloc(NORMAL): missing page on free queue\n"));
} else {
m = vm_page_select_cache(object, pindex);
@@ -878,7 +825,7 @@ loop:
case VM_ALLOC_ZERO:
if (cnt.v_free_count >= cnt.v_free_reserved) {
- m = vm_page_select_free(object, pindex, PQ_ZERO);
+ m = vm_page_select_free(object, pindex, TRUE);
KASSERT(m != NULL, ("vm_page_alloc(ZERO): missing page on free queue\n"));
} else {
m = vm_page_select_cache(object, pindex);
@@ -899,7 +846,7 @@ loop:
if ((cnt.v_free_count >= cnt.v_free_reserved) ||
((cnt.v_cache_count == 0) &&
(cnt.v_free_count >= cnt.v_interrupt_free_min))) {
- m = vm_page_select_free(object, pindex, PQ_FREE);
+ m = vm_page_select_free(object, pindex, FALSE);
KASSERT(m != NULL, ("vm_page_alloc(SYSTEM): missing page on free queue\n"));
} else {
m = vm_page_select_cache(object, pindex);
@@ -918,7 +865,7 @@ loop:
case VM_ALLOC_INTERRUPT:
if (cnt.v_free_count > 0) {
- m = vm_page_select_free(object, pindex, PQ_FREE);
+ m = vm_page_select_free(object, pindex, FALSE);
KASSERT(m != NULL, ("vm_page_alloc(INTERRUPT): missing page on free queue\n"));
} else {
splx(s);
@@ -963,7 +910,7 @@ loop:
(*pq->lcnt)--;
oldobject = NULL;
- if (qtype == PQ_ZERO) {
+ if (m->flags & PG_ZERO) {
vm_page_zero_count--;
m->flags = PG_ZERO | PG_BUSY;
} else {
@@ -1182,7 +1129,7 @@ vm_page_free_wakeup()
*/
void
-vm_page_free_toq(vm_page_t m, int queue)
+vm_page_free_toq(vm_page_t m)
{
int s;
struct vpgqueues *pq;
@@ -1265,27 +1212,29 @@ vm_page_free_toq(vm_page_t m, int queue)
pmap_page_is_free(m);
#endif
- m->queue = queue + m->pc;
+ m->queue = PQ_FREE + m->pc;
pq = &vm_page_queues[m->queue];
++(*pq->lcnt);
++(*pq->cnt);
- if (queue == PQ_ZERO) {
- TAILQ_INSERT_HEAD(pq->pl, m, pageq);
+ /*
+ * Put zero'd pages on the end ( where we look for zero'd pages
+ * first ) and non-zerod pages at the head.
+ */
+
+ if (m->flags & PG_ZERO) {
+ TAILQ_INSERT_TAIL(pq->pl, m, pageq);
++vm_page_zero_count;
- } else {
+ } else if (curproc == pageproc) {
/*
- * If the pageout process is grabbing the page, it is likely
- * that the page is NOT in the cache. It is more likely that
- * the page will be partially in the cache if it is being
- * explicitly freed.
+ * If the pageout daemon is freeing pages, the pages are
+ * likely to NOT be in the L1 or L2 caches due to their age.
+ * For now we do not try to do anything special with this
+ * info.
*/
-
- if (curproc == pageproc) {
- TAILQ_INSERT_TAIL(pq->pl, m, pageq);
- } else {
- TAILQ_INSERT_HEAD(pq->pl, m, pageq);
- }
+ TAILQ_INSERT_HEAD(pq->pl, m, pageq);
+ } else {
+ TAILQ_INSERT_HEAD(pq->pl, m, pageq);
}
vm_page_free_wakeup();
@@ -1640,7 +1589,7 @@ again:
int pqtype;
phys = VM_PAGE_TO_PHYS(&pga[i]);
pqtype = pga[i].queue - pga[i].pc;
- if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
+ if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
(phys >= low) && (phys < high) &&
((phys & (alignment - 1)) == 0) &&
(((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
@@ -1724,7 +1673,7 @@ again1:
pqtype = pga[i].queue - pga[i].pc;
if ((VM_PAGE_TO_PHYS(&pga[i]) !=
(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
- ((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
+ ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
start++;
goto again;
}
@@ -1843,12 +1792,6 @@ DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
}
db_printf("\n");
- db_printf("PQ_ZERO:");
- for(i=0;i<PQ_L2_SIZE;i++) {
- db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt);
- }
- db_printf("\n");
-
db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
*vm_page_queues[PQ_ACTIVE].lcnt,
*vm_page_queues[PQ_INACTIVE].lcnt);
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 7fdf1cb..2bd786c 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.53 1999/01/24 05:57:50 dillon Exp $
+ * $Id: vm_page.h,v 1.54 1999/02/07 20:45:15 dillon Exp $
*/
/*
@@ -139,7 +139,7 @@ struct vm_page {
/*
* Page coloring parameters
*/
-/* Each of PQ_FREE, PQ_ZERO and PQ_CACHE have PQ_HASH_SIZE entries */
+/* Each of PQ_FREE, and PQ_CACHE have PQ_HASH_SIZE entries */
/* Define one of the following */
#if defined(PQ_HUGECACHE)
@@ -186,11 +186,11 @@ struct vm_page {
#define PQ_NONE 0
#define PQ_FREE 1
-#define PQ_ZERO (1 + PQ_L2_SIZE)
-#define PQ_INACTIVE (1 + 2*PQ_L2_SIZE)
-#define PQ_ACTIVE (2 + 2*PQ_L2_SIZE)
-#define PQ_CACHE (3 + 2*PQ_L2_SIZE)
-#define PQ_COUNT (3 + 3*PQ_L2_SIZE)
+/* #define PQ_ZERO (1 + PQ_L2_SIZE) */
+#define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
+#define PQ_ACTIVE (2 + 1*PQ_L2_SIZE)
+#define PQ_CACHE (3 + 1*PQ_L2_SIZE)
+#define PQ_COUNT (3 + 2*PQ_L2_SIZE)
extern struct vpgqueues {
struct pglist *pl;
@@ -253,7 +253,6 @@ extern struct vpgqueues {
*/
extern struct pglist vm_page_queue_free[PQ_L2_SIZE];/* memory free queue */
-extern struct pglist vm_page_queue_zero[PQ_L2_SIZE];/* zeroed memory free queue */
extern struct pglist vm_page_queue_active; /* active memory queue */
extern struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern struct pglist vm_page_queue_cache[PQ_L2_SIZE];/* cache memory queue */
@@ -406,7 +405,7 @@ int vm_page_queue_index __P((vm_offset_t, int));
int vm_page_sleep(vm_page_t m, char *msg, char *busy);
int vm_page_asleep(vm_page_t m, char *msg, char *busy);
#endif
-void vm_page_free_toq(vm_page_t m, int queue);
+void vm_page_free_toq(vm_page_t m);
/*
* Keep page from being freed by the page daemon
@@ -483,12 +482,18 @@ vm_page_copy(src_m, dest_m)
* vm_page_free:
*
* Free a page
+ *
+ * The clearing of PG_ZERO is a temporary safety until the code can be
+ * reviewed to determine that PG_ZERO is being properly cleared on
+ * write faults or maps. PG_ZERO was previously cleared in
+ * vm_page_alloc().
*/
static __inline void
vm_page_free(m)
vm_page_t m;
{
- vm_page_free_toq(m, PQ_FREE);
+ vm_page_flag_clear(m, PG_ZERO);
+ vm_page_free_toq(m);
}
/*
@@ -500,7 +505,8 @@ static __inline void
vm_page_free_zero(m)
vm_page_t m;
{
- vm_page_free_toq(m, PQ_ZERO);
+ vm_page_flag_set(m, PG_ZERO);
+ vm_page_free_toq(m);
}
/*
@@ -552,16 +558,24 @@ vm_page_dirty(vm_page_t m)
}
static __inline vm_page_t
-vm_page_list_find(int basequeue, int index)
+vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
{
vm_page_t m;
#if PQ_L2_SIZE > 1
- m = TAILQ_FIRST(vm_page_queues[basequeue+index].pl);
+ if (prefer_zero) {
+ m = TAILQ_LAST(vm_page_queues[basequeue+index].pl, pglist);
+ } else {
+ m = TAILQ_FIRST(vm_page_queues[basequeue+index].pl);
+ }
if (m == NULL)
m = _vm_page_list_find(basequeue, index);
#else
- m = TAILQ_FIRST(vm_page_queues[basequeue].pl);
+ if (prefer_zero) {
+ m = TAILQ_LAST(vm_page_queues[basequeue].pl, pglist);
+ } else {
+ m = TAILQ_FIRST(vm_page_queues[basequeue].pl);
+ }
#endif
return(m);
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 9ab6c52..ecb331e 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.134 1999/01/24 06:04:52 dillon Exp $
+ * $Id: vm_pageout.c,v 1.135 1999/02/07 21:48:23 dillon Exp $
*/
/*
@@ -1079,7 +1079,7 @@ rescan0:
while (cnt.v_free_count < cnt.v_free_reserved) {
static int cache_rover = 0;
- m = vm_page_list_find(PQ_CACHE, cache_rover);
+ m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE);
if (!m)
break;
if ((m->flags & PG_BUSY) || m->busy || m->hold_count || m->wire_count) {
OpenPOWER on IntegriCloud