diff options
author | peter <peter@FreeBSD.org> | 2003-12-06 23:18:42 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2003-12-06 23:18:42 +0000 |
commit | 09751b87c2960d25112bc75aa65de9b27e88dcbc (patch) | |
tree | 264ceb34aa2fac8d33d825a983da1a0484ceaab0 /sys/amd64 | |
parent | e90d8494fb40aebc8bf53f073340f14c144ec0e7 (diff) | |
download | FreeBSD-src-09751b87c2960d25112bc75aa65de9b27e88dcbc.zip FreeBSD-src-09751b87c2960d25112bc75aa65de9b27e88dcbc.tar.gz |
amd64_protection_init and the protection_codes[] array was overkill.
Inline it instead.
Approved by: re (scottl)
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/amd64/pmap.c | 58 |
1 files changed, 8 insertions, 50 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index f0dc985..5dddd7b 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -40,7 +40,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 + * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 */ /*- * Copyright (c) 2003 Networks Associates Technology, Inc. @@ -158,13 +158,6 @@ __FBSDID("$FreeBSD$"); #define PMAP_INLINE #endif -/* - * Given a map and a machine independent protection code, - * convert to a vax protection code. - */ -#define pte_prot(m, p) (protection_codes[p]) -static pt_entry_t protection_codes[8]; - struct pmap kernel_pmap_store; LIST_HEAD(pmaplist, pmap); static struct pmaplist allpmaps; @@ -219,7 +212,6 @@ static caddr_t crashdumpmap; static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(void); -static void amd64_protection_init(void); static void pmap_clear_ptes(vm_page_t m, int bit) __always_inline; @@ -477,11 +469,6 @@ pmap_bootstrap(firstaddr) load_cr3(KPML4phys); /* - * Initialize protection array. - */ - amd64_protection_init(); - - /* * Initialize the kernel pmap (which is statically allocated). */ kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys); @@ -2095,8 +2082,13 @@ validate: /* * Now validate mapping with desired protection/wiring. */ - newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) | PG_V); - + newpte = (pt_entry_t)(pa | PG_V); + if ((prot & VM_PROT_WRITE) != 0) + newpte |= PG_RW; +#ifdef PG_NX + if ((prot & VM_PROT_EXECUTE) == 0) + newpte |= PG_NX; +#endif if (wired) newpte |= PG_W; if (va < VM_MAXUSER_ADDRESS) @@ -2845,40 +2837,6 @@ pmap_clear_reference(vm_page_t m) * Miscellaneous support routines follow */ -static void -amd64_protection_init() -{ - register long *kp, prot; - -#if 0 -#define PG_NX (1ul << 63) -#else -#define PG_NX 0 -#endif - - kp = protection_codes; - for (prot = 0; prot < 8; prot++) { - switch (prot) { - case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: - case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: - *kp++ = PG_NX; - break; - case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: - case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: - *kp++ = 0; - break; - case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: - case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: - *kp++ = PG_RW | PG_NX; - break; - case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: - case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: - *kp++ = PG_RW; - break; - } - } -} - /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This |