summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2002-04-15 16:00:03 +0000
committerpeter <peter@FreeBSD.org>2002-04-15 16:00:03 +0000
commit3d8c7d4cabad90e8d24802b5e0864d76dc5a05d9 (patch)
tree07394998150236b94ddf401e0d3efec082c9ab55 /sys/vm
parent929a8fb33da9bf37ac6d590e39a2e60f00c6c4dd (diff)
downloadFreeBSD-src-3d8c7d4cabad90e8d24802b5e0864d76dc5a05d9.zip
FreeBSD-src-3d8c7d4cabad90e8d24802b5e0864d76dc5a05d9.tar.gz
Pass vm_page_t instead of physical addresses to pmap_zero_page[_area]()
and pmap_copy_page(). This gets rid of a couple more physical addresses in upper layers, with the eventual aim of supporting PAE and dealing with the physical addressing mostly within pmap. (We will need either 64 bit physical addresses or page indexes, possibly both depending on the circumstances. Leaving this to pmap itself gives more flexibilitly.) Reviewed by: jake Tested on: i386, ia64 and (I believe) sparc64. (my alpha was hosed)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h6
-rw-r--r--sys/vm/vm_map.c2
-rw-r--r--sys/vm/vm_page.c32
-rw-r--r--sys/vm/vm_zeroidle.c2
4 files changed, 14 insertions, 28 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 4edb3da..8db3b03 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -97,7 +97,7 @@ void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
void pmap_collect(void);
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
-void pmap_copy_page(vm_offset_t, vm_offset_t);
+void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_destroy(pmap_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
@@ -126,8 +126,8 @@ void pmap_reference(pmap_t);
void pmap_release(pmap_t);
void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t);
-void pmap_zero_page(vm_offset_t);
-void pmap_zero_page_area(vm_offset_t, int off, int size);
+void pmap_zero_page(vm_page_t);
+void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_new_proc(struct proc *p);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 0fc76a3..12948b1 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -3221,7 +3221,7 @@ vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
}
vm_page_protect(m_in, VM_PROT_NONE);
- pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
+ pmap_copy_page(m_in, m_out);
m_out->valid = m_in->valid;
vm_page_dirty(m_out);
vm_page_activate(m_out);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 967ba69..38c505f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -455,7 +455,7 @@ vm_page_protect(vm_page_t mem, int prot)
boolean_t
vm_page_zero_fill(vm_page_t m)
{
- pmap_zero_page(VM_PAGE_TO_PHYS(m));
+ pmap_zero_page(m);
return (TRUE);
}
@@ -467,7 +467,7 @@ vm_page_zero_fill(vm_page_t m)
void
vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
{
- pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
+ pmap_copy_page(src_m, dest_m);
dest_m->valid = VM_PAGE_BITS_ALL;
}
@@ -1582,14 +1582,8 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
* first block.
*/
if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
- (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
- ) {
- pmap_zero_page_area(
- VM_PAGE_TO_PHYS(m),
- frag,
- base - frag
- );
- }
+ (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
+ pmap_zero_page_area(m, frag, base - frag);
/*
* If the ending offset is not DEV_BSIZE aligned and the
@@ -1598,14 +1592,9 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
*/
endoff = base + size;
if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
- (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
- ) {
- pmap_zero_page_area(
- VM_PAGE_TO_PHYS(m),
- endoff,
- DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
- );
- }
+ (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
+ pmap_zero_page_area(m, endoff,
+ DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
/*
* Set valid, clear dirty bits. If validating the entire
@@ -1702,11 +1691,8 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
(m->valid & (1 << i))
) {
if (i > b) {
- pmap_zero_page_area(
- VM_PAGE_TO_PHYS(m),
- b << DEV_BSHIFT,
- (i - b) << DEV_BSHIFT
- );
+ pmap_zero_page_area(m,
+ b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
}
b = i + 1;
}
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 92e5f27..99ace6e 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -82,7 +82,7 @@ vm_page_zero_idle(void)
TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
/* maybe drop out of Giant here */
- pmap_zero_page(VM_PAGE_TO_PHYS(m));
+ pmap_zero_page(m);
/* and return here */
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
OpenPOWER on IntegriCloud