diff options
author | peter <peter@FreeBSD.org> | 2002-04-15 16:00:03 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2002-04-15 16:00:03 +0000 |
commit | 3d8c7d4cabad90e8d24802b5e0864d76dc5a05d9 (patch) | |
tree | 07394998150236b94ddf401e0d3efec082c9ab55 /sys | |
parent | 929a8fb33da9bf37ac6d590e39a2e60f00c6c4dd (diff) | |
download | FreeBSD-src-3d8c7d4cabad90e8d24802b5e0864d76dc5a05d9.zip FreeBSD-src-3d8c7d4cabad90e8d24802b5e0864d76dc5a05d9.tar.gz |
Pass vm_page_t instead of physical addresses to pmap_zero_page[_area]()
and pmap_copy_page(). This gets rid of a couple more physical addresses
in upper layers, with the eventual aim of supporting PAE and dealing with
the physical addressing mostly within pmap. (We will need either 64 bit
physical addresses or page indexes, possibly both depending on the
circumstances. Leaving this to pmap itself gives more flexibilitly.)
Reviewed by: jake
Tested on: i386, ia64 and (I believe) sparc64. (my alpha was hosed)
Diffstat (limited to 'sys')
-rw-r--r-- | sys/alpha/alpha/pmap.c | 20 | ||||
-rw-r--r-- | sys/amd64/amd64/pmap.c | 20 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 20 | ||||
-rw-r--r-- | sys/ia64/ia64/pmap.c | 14 | ||||
-rw-r--r-- | sys/powerpc/aim/mmu_oea.c | 7 | ||||
-rw-r--r-- | sys/powerpc/powerpc/mmu_oea.c | 7 | ||||
-rw-r--r-- | sys/powerpc/powerpc/pmap.c | 7 | ||||
-rw-r--r-- | sys/sparc64/sparc64/pmap.c | 12 | ||||
-rw-r--r-- | sys/vm/pmap.h | 6 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 32 | ||||
-rw-r--r-- | sys/vm/vm_zeroidle.c | 2 |
12 files changed, 73 insertions, 76 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c index ab4b253..c11c250 100644 --- a/sys/alpha/alpha/pmap.c +++ b/sys/alpha/alpha/pmap.c @@ -1731,9 +1731,9 @@ pmap_growkernel(vm_offset_t addr) nklev2++; vm_page_wire(nkpg); - pa = VM_PAGE_TO_PHYS(nkpg); - pmap_zero_page(pa); + pmap_zero_page(nkpg); + pa = VM_PAGE_TO_PHYS(nkpg); newlev1 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE; @@ -1765,8 +1765,8 @@ pmap_growkernel(vm_offset_t addr) nklev3++; vm_page_wire(nkpg); + pmap_zero_page(nkpg); pa = VM_PAGE_TO_PHYS(nkpg); - pmap_zero_page(pa); newlev2 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE; *pte = newlev2; @@ -2709,9 +2709,9 @@ pmap_kernel() */ void -pmap_zero_page(vm_offset_t pa) +pmap_zero_page(vm_page_t m) { - vm_offset_t va = ALPHA_PHYS_TO_K0SEG(pa); + vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } @@ -2725,9 +2725,9 @@ pmap_zero_page(vm_offset_t pa) */ void -pmap_zero_page_area(vm_offset_t pa, int off, int size) +pmap_zero_page_area(vm_page_t m, int off, int size) { - vm_offset_t va = ALPHA_PHYS_TO_K0SEG(pa); + vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); bzero((char *)(caddr_t)va + off, size); } @@ -2738,10 +2738,10 @@ pmap_zero_page_area(vm_offset_t pa, int off, int size) * time. */ void -pmap_copy_page(vm_offset_t src, vm_offset_t dst) +pmap_copy_page(vm_page_t src, vm_page_t dst) { - src = ALPHA_PHYS_TO_K0SEG(src); - dst = ALPHA_PHYS_TO_K0SEG(dst); + src = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(src)); + dst = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(dst)); bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); } diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 0b81cad..7292906 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1464,7 +1464,7 @@ _pmap_allocpte(pmap, ptepindex) pteva = VM_MAXUSER_ADDRESS + i386_ptob(ptepindex); bzero((caddr_t) pteva, PAGE_SIZE); } else { - pmap_zero_page(ptepa); + pmap_zero_page(m); } } @@ -1629,8 +1629,8 @@ pmap_growkernel(vm_offset_t addr) nkpt++; vm_page_wire(nkpg); + pmap_zero_page(nkpg); ptppaddr = VM_PAGE_TO_PHYS(nkpg); - pmap_zero_page(ptppaddr); newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); pdir_pde(PTD, kernel_vm_end) = newpdir; @@ -2861,13 +2861,14 @@ pmap_kernel() * the page into KVM and using bzero to clear its contents. */ void -pmap_zero_page(vm_offset_t phys) +pmap_zero_page(vm_page_t m) { + vm_offset_t phys = VM_PAGE_TO_PHYS(m); if (*CMAP2) panic("pmap_zero_page: CMAP2 busy"); - *CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; + *CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M; invltlb_1pg((vm_offset_t)CADDR2); #if defined(I686_CPU) @@ -2886,13 +2887,14 @@ pmap_zero_page(vm_offset_t phys) * off and size may not cover an area beyond a single hardware page. */ void -pmap_zero_page_area(vm_offset_t phys, int off, int size) +pmap_zero_page_area(vm_page_t m, int off, int size) { + vm_offset_t phys = VM_PAGE_TO_PHYS(m); if (*CMAP2) panic("pmap_zero_page: CMAP2 busy"); - *CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; + *CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M; invltlb_1pg((vm_offset_t)CADDR2); #if defined(I686_CPU) @@ -2911,7 +2913,7 @@ pmap_zero_page_area(vm_offset_t phys, int off, int size) * time. */ void -pmap_copy_page(vm_offset_t src, vm_offset_t dst) +pmap_copy_page(vm_page_t src, vm_page_t dst) { if (*CMAP1) @@ -2919,8 +2921,8 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) if (*CMAP2) panic("pmap_copy_page: CMAP2 busy"); - *CMAP1 = PG_V | (src & PG_FRAME) | PG_A; - *CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M; + *CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A; + *CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M; #ifdef I386_CPU invltlb(); #else diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 0b81cad..7292906 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -1464,7 +1464,7 @@ _pmap_allocpte(pmap, ptepindex) pteva = VM_MAXUSER_ADDRESS + i386_ptob(ptepindex); bzero((caddr_t) pteva, PAGE_SIZE); } else { - pmap_zero_page(ptepa); + pmap_zero_page(m); } } @@ -1629,8 +1629,8 @@ pmap_growkernel(vm_offset_t addr) nkpt++; vm_page_wire(nkpg); + pmap_zero_page(nkpg); ptppaddr = VM_PAGE_TO_PHYS(nkpg); - pmap_zero_page(ptppaddr); newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); pdir_pde(PTD, kernel_vm_end) = newpdir; @@ -2861,13 +2861,14 @@ pmap_kernel() * the page into KVM and using bzero to clear its contents. */ void -pmap_zero_page(vm_offset_t phys) +pmap_zero_page(vm_page_t m) { + vm_offset_t phys = VM_PAGE_TO_PHYS(m); if (*CMAP2) panic("pmap_zero_page: CMAP2 busy"); - *CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; + *CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M; invltlb_1pg((vm_offset_t)CADDR2); #if defined(I686_CPU) @@ -2886,13 +2887,14 @@ pmap_zero_page(vm_offset_t phys) * off and size may not cover an area beyond a single hardware page. */ void -pmap_zero_page_area(vm_offset_t phys, int off, int size) +pmap_zero_page_area(vm_page_t m, int off, int size) { + vm_offset_t phys = VM_PAGE_TO_PHYS(m); if (*CMAP2) panic("pmap_zero_page: CMAP2 busy"); - *CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; + *CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M; invltlb_1pg((vm_offset_t)CADDR2); #if defined(I686_CPU) @@ -2911,7 +2913,7 @@ pmap_zero_page_area(vm_offset_t phys, int off, int size) * time. */ void -pmap_copy_page(vm_offset_t src, vm_offset_t dst) +pmap_copy_page(vm_page_t src, vm_page_t dst) { if (*CMAP1) @@ -2919,8 +2921,8 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) if (*CMAP2) panic("pmap_copy_page: CMAP2 busy"); - *CMAP1 = PG_V | (src & PG_FRAME) | PG_A; - *CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M; + *CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A; + *CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M; #ifdef I386_CPU invltlb(); #else diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c index 33226ef..c6dfc3a 100644 --- a/sys/ia64/ia64/pmap.c +++ b/sys/ia64/ia64/pmap.c @@ -2113,9 +2113,9 @@ pmap_kernel() */ void -pmap_zero_page(vm_offset_t pa) +pmap_zero_page(vm_page_t m) { - vm_offset_t va = IA64_PHYS_TO_RR7(pa); + vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } @@ -2129,9 +2129,9 @@ pmap_zero_page(vm_offset_t pa) */ void -pmap_zero_page_area(vm_offset_t pa, int off, int size) +pmap_zero_page_area(vm_page_t m, int off, int size) { - vm_offset_t va = IA64_PHYS_TO_RR7(pa); + vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((char *)(caddr_t)va + off, size); } @@ -2142,10 +2142,10 @@ pmap_zero_page_area(vm_offset_t pa, int off, int size) * time. */ void -pmap_copy_page(vm_offset_t src, vm_offset_t dst) +pmap_copy_page(vm_page_t src, vm_page_t dst) { - src = IA64_PHYS_TO_RR7(src); - dst = IA64_PHYS_TO_RR7(dst); + src = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(src)); + dst = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(dst)); bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); } diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index 8a0096c..f5acbe5 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -817,7 +817,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, } void -pmap_copy_page(vm_offset_t src, vm_offset_t dst) +pmap_copy_page(vm_page_t src, vm_page_t dst) { TODO; } @@ -826,8 +826,9 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) * Zero a page of physical memory by temporarily mapping it into the tlb. */ void -pmap_zero_page(vm_offset_t pa) +pmap_zero_page(vm_page_t m) { + vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; @@ -854,7 +855,7 @@ pmap_zero_page(vm_offset_t pa) } void -pmap_zero_page_area(vm_offset_t pa, int off, int size) +pmap_zero_page_area(vm_page_t m, int off, int size) { TODO; } diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c index 8a0096c..f5acbe5 100644 --- a/sys/powerpc/powerpc/mmu_oea.c +++ b/sys/powerpc/powerpc/mmu_oea.c @@ -817,7 +817,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, } void -pmap_copy_page(vm_offset_t src, vm_offset_t dst) +pmap_copy_page(vm_page_t src, vm_page_t dst) { TODO; } @@ -826,8 +826,9 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) * Zero a page of physical memory by temporarily mapping it into the tlb. */ void -pmap_zero_page(vm_offset_t pa) +pmap_zero_page(vm_page_t m) { + vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; @@ -854,7 +855,7 @@ pmap_zero_page(vm_offset_t pa) } void -pmap_zero_page_area(vm_offset_t pa, int off, int size) +pmap_zero_page_area(vm_page_t m, int off, int size) { TODO; } diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c index 8a0096c..f5acbe5 100644 --- a/sys/powerpc/powerpc/pmap.c +++ b/sys/powerpc/powerpc/pmap.c @@ -817,7 +817,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, } void -pmap_copy_page(vm_offset_t src, vm_offset_t dst) +pmap_copy_page(vm_page_t src, vm_page_t dst) { TODO; } @@ -826,8 +826,9 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst) * Zero a page of physical memory by temporarily mapping it into the tlb. */ void -pmap_zero_page(vm_offset_t pa) +pmap_zero_page(vm_page_t m) { + vm_offset_t pa = VM_PAGE_TO_PHYS(m); caddr_t va; int i; @@ -854,7 +855,7 @@ pmap_zero_page(vm_offset_t pa) } void -pmap_zero_page_area(vm_offset_t pa, int off, int size) +pmap_zero_page_area(vm_page_t m, int off, int size) { TODO; } diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index 03ac071..37ec3139 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -1144,7 +1144,7 @@ pmap_pinit(pmap_t pm) m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_RETRY | VM_ALLOC_ZERO); if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(VM_PAGE_TO_PHYS(m)); + pmap_zero_page(m); m->wire_count++; cnt.v_wire_count++; @@ -1598,8 +1598,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, * Zero a page of physical memory by temporarily mapping it into the tlb. */ void -pmap_zero_page(vm_offset_t pa) +pmap_zero_page(vm_page_t m) { + vm_offset_t pa = VM_PAGE_TO_PHYS(m); CTR1(KTR_PMAP, "pmap_zero_page: pa=%#lx", pa); dcache_inval_phys(pa, pa + PAGE_SIZE); @@ -1607,8 +1608,9 @@ pmap_zero_page(vm_offset_t pa) } void -pmap_zero_page_area(vm_offset_t pa, int off, int size) +pmap_zero_page_area(vm_page_t m, int off, int size) { + vm_offset_t pa = VM_PAGE_TO_PHYS(m); CTR3(KTR_PMAP, "pmap_zero_page_area: pa=%#lx off=%#x size=%#x", pa, off, size); @@ -1621,8 +1623,10 @@ pmap_zero_page_area(vm_offset_t pa, int off, int size) * Copy a page of physical memory by temporarily mapping it into the tlb. */ void -pmap_copy_page(vm_offset_t src, vm_offset_t dst) +pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { + vm_offset_t src = VM_PAGE_TO_PHYS(msrc); + vm_offset_t dst = VM_PAGE_TO_PHYS(mdst); CTR2(KTR_PMAP, "pmap_copy_page: src=%#lx dst=%#lx", src, dst); dcache_inval_phys(dst, dst + PAGE_SIZE); diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 4edb3da..8db3b03 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -97,7 +97,7 @@ void pmap_clear_modify(vm_page_t m); void pmap_clear_reference(vm_page_t m); void pmap_collect(void); void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); -void pmap_copy_page(vm_offset_t, vm_offset_t); +void pmap_copy_page(vm_page_t, vm_page_t); void pmap_destroy(pmap_t); void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); @@ -126,8 +126,8 @@ void pmap_reference(pmap_t); void pmap_release(pmap_t); void pmap_remove(pmap_t, vm_offset_t, vm_offset_t); void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t); -void pmap_zero_page(vm_offset_t); -void pmap_zero_page_area(vm_offset_t, int off, int size); +void pmap_zero_page(vm_page_t); +void pmap_zero_page_area(vm_page_t, int off, int size); void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t); int pmap_mincore(pmap_t pmap, vm_offset_t addr); void pmap_new_proc(struct proc *p); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 0fc76a3..12948b1 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -3221,7 +3221,7 @@ vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) } vm_page_protect(m_in, VM_PROT_NONE); - pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out)); + pmap_copy_page(m_in, m_out); m_out->valid = m_in->valid; vm_page_dirty(m_out); vm_page_activate(m_out); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 967ba69..38c505f 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -455,7 +455,7 @@ vm_page_protect(vm_page_t mem, int prot) boolean_t vm_page_zero_fill(vm_page_t m) { - pmap_zero_page(VM_PAGE_TO_PHYS(m)); + pmap_zero_page(m); return (TRUE); } @@ -467,7 +467,7 @@ vm_page_zero_fill(vm_page_t m) void vm_page_copy(vm_page_t src_m, vm_page_t dest_m) { - pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); + pmap_copy_page(src_m, dest_m); dest_m->valid = VM_PAGE_BITS_ALL; } @@ -1582,14 +1582,8 @@ vm_page_set_validclean(vm_page_t m, int base, int size) * first block. */ if ((frag = base & ~(DEV_BSIZE - 1)) != base && - (m->valid & (1 << (base >> DEV_BSHIFT))) == 0 - ) { - pmap_zero_page_area( - VM_PAGE_TO_PHYS(m), - frag, - base - frag - ); - } + (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) + pmap_zero_page_area(m, frag, base - frag); /* * If the ending offset is not DEV_BSIZE aligned and the @@ -1598,14 +1592,9 @@ vm_page_set_validclean(vm_page_t m, int base, int size) */ endoff = base + size; if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && - (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0 - ) { - pmap_zero_page_area( - VM_PAGE_TO_PHYS(m), - endoff, - DEV_BSIZE - (endoff & (DEV_BSIZE - 1)) - ); - } + (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) + pmap_zero_page_area(m, endoff, + DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); /* * Set valid, clear dirty bits. If validating the entire @@ -1702,11 +1691,8 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) (m->valid & (1 << i)) ) { if (i > b) { - pmap_zero_page_area( - VM_PAGE_TO_PHYS(m), - b << DEV_BSHIFT, - (i - b) << DEV_BSHIFT - ); + pmap_zero_page_area(m, + b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); } b = i + 1; } diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c index 92e5f27..99ace6e 100644 --- a/sys/vm/vm_zeroidle.c +++ b/sys/vm/vm_zeroidle.c @@ -82,7 +82,7 @@ vm_page_zero_idle(void) TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); m->queue = PQ_NONE; /* maybe drop out of Giant here */ - pmap_zero_page(VM_PAGE_TO_PHYS(m)); + pmap_zero_page(m); /* and return here */ vm_page_flag_set(m, PG_ZERO); m->queue = PQ_FREE + m->pc; |