diff options
author | peter <peter@FreeBSD.org> | 2000-05-21 12:50:18 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2000-05-21 12:50:18 +0000 |
commit | ee5cd6988fd6860707babbcfbe036049d375cafd (patch) | |
tree | 2df035f35d0064994e9cd134cd613f8e79cdb23f /sys/vm/pmap.h | |
parent | 65f441c07383ce9e6f10124810cff27e1cb0f737 (diff) | |
download | FreeBSD-src-ee5cd6988fd6860707babbcfbe036049d375cafd.zip FreeBSD-src-ee5cd6988fd6860707babbcfbe036049d375cafd.tar.gz |
Implement an optimization of the VM<->pmap API. Pass vm_page_t's directly
to various pmap_*() functions instead of looking up the physical address
and passing that. In many cases, the first thing the pmap code was doing
was going to a lot of trouble to get back the original vm_page_t, or
it's shadow pv_table entry.
Inspired by: John Dyson's 1998 patches.
Also:
Eliminate pv_table as a seperate thing and build it into a machine
dependent part of vm_page_t. This eliminates having a seperate set of
structions that shadow each other in a 1:1 fashion that we often went to
a lot of trouble to translate from one to the other. (see above)
This happens to save 4 bytes of physical memory for each page in the
system. (8 bytes on the Alpha).
Eliminate the use of the phys_avail[] array to determine if a page is
managed (ie: it has pv_entries etc). Store this information in a flag.
Things like device_pager set it because they create vm_page_t's on the
fly that do not have pv_entries. This makes it easier to "unmanage" a
page of physical memory (this will be taken advantage of in subsequent
commits).
Add a function to add a new page to the freelist. This could be used
for reclaiming the previously wasted pages left over from preloaded
loader(8) files.
Reviewed by: dillon
Diffstat (limited to 'sys/vm/pmap.h')
-rw-r--r-- | sys/vm/pmap.h | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 7a1bd1b..3087236 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -94,28 +94,28 @@ struct proc; void pmap_page_is_free __P((vm_page_t m)); #endif void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t)); -void pmap_clear_modify __P((vm_offset_t pa)); -void pmap_clear_reference __P((vm_offset_t pa)); +void pmap_clear_modify __P((vm_page_t m)); +void pmap_clear_reference __P((vm_page_t m)); void pmap_collect __P((void)); void pmap_copy __P((pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t)); void pmap_copy_page __P((vm_offset_t, vm_offset_t)); void pmap_destroy __P((pmap_t)); -void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, +void pmap_enter __P((pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t)); -vm_offset_t pmap_extract __P((pmap_t, vm_offset_t)); +vm_offset_t pmap_extract __P((pmap_t pmap, vm_offset_t va)); void pmap_growkernel __P((vm_offset_t)); void pmap_init __P((vm_offset_t, vm_offset_t)); -boolean_t pmap_is_modified __P((vm_offset_t pa)); -boolean_t pmap_ts_referenced __P((vm_offset_t pa)); -void pmap_kenter __P((vm_offset_t, vm_offset_t)); +boolean_t pmap_is_modified __P((vm_page_t m)); +boolean_t pmap_ts_referenced __P((vm_page_t m)); +void pmap_kenter __P((vm_offset_t va, vm_offset_t pa)); void pmap_kremove __P((vm_offset_t)); vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int)); void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_offset_t size, int pagelimit)); -boolean_t pmap_page_exists __P((pmap_t, vm_offset_t)); -void pmap_page_protect __P((vm_offset_t, vm_prot_t)); +boolean_t pmap_page_exists __P((pmap_t pmap, vm_page_t m)); +void pmap_page_protect __P((vm_page_t m, vm_prot_t prot)); void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t, boolean_t)); vm_offset_t pmap_phys_address __P((int)); @@ -140,7 +140,7 @@ void pmap_swapout_proc __P((struct proc *p)); void pmap_swapin_proc __P((struct proc *p)); void pmap_activate __P((struct proc *p)); vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size)); -void pmap_init2 __P((void)); +void pmap_init2 __P((void)); #endif /* _KERNEL */ |