diff options
author | peter <peter@FreeBSD.org> | 2000-05-21 12:50:18 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2000-05-21 12:50:18 +0000 |
commit | ee5cd6988fd6860707babbcfbe036049d375cafd (patch) | |
tree | 2df035f35d0064994e9cd134cd613f8e79cdb23f /sys/vm/vm_page.c | |
parent | 65f441c07383ce9e6f10124810cff27e1cb0f737 (diff) | |
download | FreeBSD-src-ee5cd6988fd6860707babbcfbe036049d375cafd.zip FreeBSD-src-ee5cd6988fd6860707babbcfbe036049d375cafd.tar.gz |
Implement an optimization of the VM<->pmap API. Pass vm_page_t's directly
to various pmap_*() functions instead of looking up the physical address
and passing that. In many cases, the first thing the pmap code was doing
was going to a lot of trouble to get back the original vm_page_t, or
it's shadow pv_table entry.
Inspired by: John Dyson's 1998 patches.
Also:
Eliminate pv_table as a seperate thing and build it into a machine
dependent part of vm_page_t. This eliminates having a seperate set of
structions that shadow each other in a 1:1 fashion that we often went to
a lot of trouble to translate from one to the other. (see above)
This happens to save 4 bytes of physical memory for each page in the
system. (8 bytes on the Alpha).
Eliminate the use of the phys_avail[] array to determine if a page is
managed (ie: it has pv_entries etc). Store this information in a flag.
Things like device_pager set it because they create vm_page_t's on the
fly that do not have pv_entries. This makes it easier to "unmanage" a
page of physical memory (this will be taken advantage of in subsequent
commits).
Add a function to add a new page to the freelist. This could be used
for reclaiming the previously wasted pages left over from preloaded
loader(8) files.
Reviewed by: dillon
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r-- | sys/vm/vm_page.c | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 1f1a066..6b2b320 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -119,7 +119,7 @@ vm_page_queue_init(void) { } vm_page_t vm_page_array = 0; -static int vm_page_array_size = 0; +int vm_page_array_size = 0; long first_page = 0; int vm_page_zero_count = 0; @@ -143,6 +143,30 @@ vm_set_page_size() } /* + * vm_add_new_page: + * + * Add a new page to the freelist for use by the system. + * Must be called at splhigh(). + */ +vm_page_t +vm_add_new_page(pa) + vm_offset_t pa; +{ + vm_page_t m; + + ++cnt.v_page_count; + ++cnt.v_free_count; + m = PHYS_TO_VM_PAGE(pa); + m->phys_addr = pa; + m->flags = 0; + m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; + m->queue = m->pc + PQ_FREE; + TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); + vm_page_queues[m->queue].lcnt++; + return (m); +} + +/* * vm_page_startup: * * Initializes the resident memory module. @@ -159,7 +183,6 @@ vm_page_startup(starta, enda, vaddr) register vm_offset_t vaddr; { register vm_offset_t mapped; - register vm_page_t m; register struct vm_page **bucket; vm_size_t npages, page_range; register vm_offset_t new_start; @@ -296,15 +319,7 @@ vm_page_startup(starta, enda, vaddr) else pa = phys_avail[i]; while (pa < phys_avail[i + 1] && npages-- > 0) { - ++cnt.v_page_count; - ++cnt.v_free_count; - m = PHYS_TO_VM_PAGE(pa); - m->phys_addr = pa; - m->flags = 0; - m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; - m->queue = m->pc + PQ_FREE; - TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); - vm_page_queues[m->queue].lcnt++; + vm_add_new_page(pa); pa += PAGE_SIZE; } } @@ -1518,7 +1533,7 @@ vm_page_set_validclean(m, base, size) m->valid |= pagebits; m->dirty &= ~pagebits; if (base == 0 && size == PAGE_SIZE) { - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_flag_clear(m, PG_NOSYNC); } } @@ -1649,8 +1664,7 @@ void vm_page_test_dirty(m) vm_page_t m; { - if ((m->dirty != VM_PAGE_BITS_ALL) && - pmap_is_modified(VM_PAGE_TO_PHYS(m))) { + if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { vm_page_dirty(m); } } |