diff options
author | peter <peter@FreeBSD.org> | 2000-05-21 12:50:18 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2000-05-21 12:50:18 +0000 |
commit | ee5cd6988fd6860707babbcfbe036049d375cafd (patch) | |
tree | 2df035f35d0064994e9cd134cd613f8e79cdb23f /sys/vm | |
parent | 65f441c07383ce9e6f10124810cff27e1cb0f737 (diff) | |
download | FreeBSD-src-ee5cd6988fd6860707babbcfbe036049d375cafd.zip FreeBSD-src-ee5cd6988fd6860707babbcfbe036049d375cafd.tar.gz |
Implement an optimization of the VM<->pmap API. Pass vm_page_t's directly
to various pmap_*() functions instead of looking up the physical address
and passing that. In many cases, the first thing the pmap code was doing
was going to a lot of trouble to get back the original vm_page_t, or
it's shadow pv_table entry.
Inspired by: John Dyson's 1998 patches.
Also:
Eliminate pv_table as a seperate thing and build it into a machine
dependent part of vm_page_t. This eliminates having a seperate set of
structions that shadow each other in a 1:1 fashion that we often went to
a lot of trouble to translate from one to the other. (see above)
This happens to save 4 bytes of physical memory for each page in the
system. (8 bytes on the Alpha).
Eliminate the use of the phys_avail[] array to determine if a page is
managed (ie: it has pv_entries etc). Store this information in a flag.
Things like device_pager set it because they create vm_page_t's on the
fly that do not have pv_entries. This makes it easier to "unmanage" a
page of physical memory (this will be taken advantage of in subsequent
commits).
Add a function to add a new page to the freelist. This could be used
for reclaiming the previously wasted pages left over from preloaded
loader(8) files.
Reviewed by: dillon
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/pmap.h | 20 | ||||
-rw-r--r-- | sys/vm/swap_pager.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_fault.c | 5 | ||||
-rw-r--r-- | sys/vm/vm_kern.c | 3 | ||||
-rw-r--r-- | sys/vm/vm_mmap.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 42 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 7 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 16 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 6 |
10 files changed, 62 insertions, 47 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 7a1bd1b..3087236 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -94,28 +94,28 @@ struct proc; void pmap_page_is_free __P((vm_page_t m)); #endif void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t)); -void pmap_clear_modify __P((vm_offset_t pa)); -void pmap_clear_reference __P((vm_offset_t pa)); +void pmap_clear_modify __P((vm_page_t m)); +void pmap_clear_reference __P((vm_page_t m)); void pmap_collect __P((void)); void pmap_copy __P((pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t)); void pmap_copy_page __P((vm_offset_t, vm_offset_t)); void pmap_destroy __P((pmap_t)); -void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, +void pmap_enter __P((pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t)); -vm_offset_t pmap_extract __P((pmap_t, vm_offset_t)); +vm_offset_t pmap_extract __P((pmap_t pmap, vm_offset_t va)); void pmap_growkernel __P((vm_offset_t)); void pmap_init __P((vm_offset_t, vm_offset_t)); -boolean_t pmap_is_modified __P((vm_offset_t pa)); -boolean_t pmap_ts_referenced __P((vm_offset_t pa)); -void pmap_kenter __P((vm_offset_t, vm_offset_t)); +boolean_t pmap_is_modified __P((vm_page_t m)); +boolean_t pmap_ts_referenced __P((vm_page_t m)); +void pmap_kenter __P((vm_offset_t va, vm_offset_t pa)); void pmap_kremove __P((vm_offset_t)); vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int)); void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_offset_t size, int pagelimit)); -boolean_t pmap_page_exists __P((pmap_t, vm_offset_t)); -void pmap_page_protect __P((vm_offset_t, vm_prot_t)); +boolean_t pmap_page_exists __P((pmap_t pmap, vm_page_t m)); +void pmap_page_protect __P((vm_page_t m, vm_prot_t prot)); void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t, boolean_t)); vm_offset_t pmap_phys_address __P((int)); @@ -140,7 +140,7 @@ void pmap_swapout_proc __P((struct proc *p)); void pmap_swapin_proc __P((struct proc *p)); void pmap_activate __P((struct proc *p)); vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size)); -void pmap_init2 __P((void)); +void pmap_init2 __P((void)); #endif /* _KERNEL */ diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 4e8174b..31b632a 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -1592,7 +1592,7 @@ swp_pager_async_iodone(bp) * valid bits here, it is up to the caller. */ - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); m->valid = VM_PAGE_BITS_ALL; vm_page_undirty(m); vm_page_flag_clear(m, PG_ZERO); @@ -1618,7 +1618,7 @@ swp_pager_async_iodone(bp) * busy count and possibly wakes waiter's up ). */ vm_page_protect(m, VM_PROT_READ); - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_undirty(m); vm_page_io_finish(m); } diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 698de1a..14133fa 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -826,7 +826,7 @@ readrest: printf("Warning: page %p partially invalid on fault\n", fs.m); } - pmap_enter(fs.map->pmap, vaddr, VM_PAGE_TO_PHYS(fs.m), prot, wired); + pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired); if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { pmap_prefault(fs.map->pmap, vaddr, fs.entry); @@ -1075,8 +1075,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) */ vm_page_flag_clear(dst_m, PG_ZERO); - pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), - prot, FALSE); + pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE); vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED); /* diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 9b8584c..ee9e7e4 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -399,8 +399,7 @@ retry: /* * Because this is kernel_pmap, this call will not block. */ - pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m), - VM_PROT_ALL, 1); + pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); } vm_map_unlock(map); diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index d617d0c..d5cc69d 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -809,10 +809,10 @@ RestartScan: if (m) { mincoreinfo = MINCORE_INCORE; if (m->dirty || - pmap_is_modified(VM_PAGE_TO_PHYS(m))) + pmap_is_modified(m)) mincoreinfo |= MINCORE_MODIFIED_OTHER; if ((m->flags & PG_REFERENCED) || - pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) { + pmap_ts_referenced(m)) { vm_page_flag_set(m, PG_REFERENCED); mincoreinfo |= MINCORE_REFERENCED_OTHER; } diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index c77e6d8..1b33f78 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -867,7 +867,7 @@ shadowlookup: * can without actually taking the step of unmapping * it. */ - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); m->dirty = 0; m->act_count = 0; vm_page_dontneed(m); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 1f1a066..6b2b320 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -119,7 +119,7 @@ vm_page_queue_init(void) { } vm_page_t vm_page_array = 0; -static int vm_page_array_size = 0; +int vm_page_array_size = 0; long first_page = 0; int vm_page_zero_count = 0; @@ -143,6 +143,30 @@ vm_set_page_size() } /* + * vm_add_new_page: + * + * Add a new page to the freelist for use by the system. + * Must be called at splhigh(). + */ +vm_page_t +vm_add_new_page(pa) + vm_offset_t pa; +{ + vm_page_t m; + + ++cnt.v_page_count; + ++cnt.v_free_count; + m = PHYS_TO_VM_PAGE(pa); + m->phys_addr = pa; + m->flags = 0; + m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; + m->queue = m->pc + PQ_FREE; + TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); + vm_page_queues[m->queue].lcnt++; + return (m); +} + +/* * vm_page_startup: * * Initializes the resident memory module. @@ -159,7 +183,6 @@ vm_page_startup(starta, enda, vaddr) register vm_offset_t vaddr; { register vm_offset_t mapped; - register vm_page_t m; register struct vm_page **bucket; vm_size_t npages, page_range; register vm_offset_t new_start; @@ -296,15 +319,7 @@ vm_page_startup(starta, enda, vaddr) else pa = phys_avail[i]; while (pa < phys_avail[i + 1] && npages-- > 0) { - ++cnt.v_page_count; - ++cnt.v_free_count; - m = PHYS_TO_VM_PAGE(pa); - m->phys_addr = pa; - m->flags = 0; - m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; - m->queue = m->pc + PQ_FREE; - TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); - vm_page_queues[m->queue].lcnt++; + vm_add_new_page(pa); pa += PAGE_SIZE; } } @@ -1518,7 +1533,7 @@ vm_page_set_validclean(m, base, size) m->valid |= pagebits; m->dirty &= ~pagebits; if (base == 0 && size == PAGE_SIZE) { - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_flag_clear(m, PG_NOSYNC); } } @@ -1649,8 +1664,7 @@ void vm_page_test_dirty(m) vm_page_t m; { - if ((m->dirty != VM_PAGE_BITS_ALL) && - pmap_is_modified(VM_PAGE_TO_PHYS(m))) { + if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { vm_page_dirty(m); } } diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 4d9f0c1..e61be7f 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -117,6 +117,7 @@ struct vm_page { vm_object_t object; /* which object am I in (O,P)*/ vm_pindex_t pindex; /* offset into object (O,P) */ vm_offset_t phys_addr; /* physical address of page */ + struct md_page md; /* machine dependant stuff */ u_short queue; /* page queue index */ u_short flags, /* see below */ pc; /* page color */ @@ -278,6 +279,7 @@ extern struct vpgqueues vm_page_queues[PQ_COUNT]; extern int vm_page_zero_count; extern vm_page_t vm_page_array; /* First resident page in table */ +extern int vm_page_array_size; /* number of vm_page_t's */ extern long first_page; /* first physical page number */ #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) @@ -396,6 +398,7 @@ vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t)); void vm_page_remove __P((vm_page_t)); void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t)); vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t)); +vm_page_t vm_add_new_page __P((vm_offset_t pa)); void vm_page_unwire __P((vm_page_t, int)); void vm_page_wire __P((vm_page_t)); void vm_page_unqueue __P((vm_page_t)); @@ -448,11 +451,11 @@ vm_page_protect(vm_page_t mem, int prot) { if (prot == VM_PROT_NONE) { if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { - pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE); + pmap_page_protect(mem, VM_PROT_NONE); vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); } } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { - pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ); + pmap_page_protect(mem, VM_PROT_READ); vm_page_flag_clear(mem, PG_WRITEABLE); } } diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 592f6cd..d1fe8e1 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -395,7 +395,7 @@ vm_pageout_flush(mc, count, flags) * essentially lose the changes by pretending it * worked. */ - pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); + pmap_clear_modify(mt); vm_page_undirty(mt); break; case VM_PAGER_ERROR: @@ -475,12 +475,12 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) p->hold_count != 0 || p->busy != 0 || (p->flags & PG_BUSY) || - !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { + !pmap_page_exists(vm_map_pmap(map), p)) { p = next; continue; } - actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); + actcount = pmap_ts_referenced(p); if (actcount) { vm_page_flag_set(p, PG_REFERENCED); } else if (p->flags & PG_REFERENCED) { @@ -709,7 +709,7 @@ rescan0: */ if (m->object->ref_count == 0) { vm_page_flag_clear(m, PG_REFERENCED); - pmap_clear_reference(VM_PAGE_TO_PHYS(m)); + pmap_clear_reference(m); /* * Otherwise, if the page has been referenced while in the @@ -721,7 +721,7 @@ rescan0: * references. */ } else if (((m->flags & PG_REFERENCED) == 0) && - (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { + (actcount = pmap_ts_referenced(m))) { vm_page_activate(m); m->act_count += (actcount + ACT_ADVANCE); continue; @@ -735,7 +735,7 @@ rescan0: */ if ((m->flags & PG_REFERENCED) != 0) { vm_page_flag_clear(m, PG_REFERENCED); - actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount = pmap_ts_referenced(m); vm_page_activate(m); m->act_count += (actcount + ACT_ADVANCE + 1); continue; @@ -987,7 +987,7 @@ rescan0: if (m->flags & PG_REFERENCED) { actcount += 1; } - actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount += pmap_ts_referenced(m); if (actcount) { m->act_count += ACT_ADVANCE + actcount; if (m->act_count > ACT_MAX) @@ -1199,7 +1199,7 @@ vm_pageout_page_stats() actcount += 1; } - actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount += pmap_ts_referenced(m); if (actcount) { m->act_count += ACT_ADVANCE + actcount; if (m->act_count > ACT_MAX) diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 904ffd2..2633426 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -452,7 +452,7 @@ vnode_pager_input_smlfs(object, m) } } vm_pager_unmap_page(kva); - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_flag_clear(m, PG_ZERO); if (error) { return VM_PAGER_ERROR; @@ -515,7 +515,7 @@ vnode_pager_input_old(object, m) } vm_pager_unmap_page(kva); } - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_undirty(m); vm_page_flag_clear(m, PG_ZERO); if (!error) @@ -782,7 +782,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) */ mt->valid = VM_PAGE_BITS_ALL; vm_page_undirty(mt); /* should be an assert? XXX */ - pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); + pmap_clear_modify(mt); } else { /* * Read did not fill up entire page. Since this |