summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2008-07-06 22:36:28 +0000
committeralc <alc@FreeBSD.org>2008-07-06 22:36:28 +0000
commit2e2599b5d01f9e5f8ca6e1713c7d8ec33d9b2a92 (patch)
tree7c41eb940dafdf3fa7e3f260bc04970a3d1016f4 /sys/amd64
parent63f02baf0e5ce3db18056a08a90ca971fb12de6d (diff)
downloadFreeBSD-src-2e2599b5d01f9e5f8ca6e1713c7d8ec33d9b2a92.zip
FreeBSD-src-2e2599b5d01f9e5f8ca6e1713c7d8ec33d9b2a92.tar.gz
Change create_pagetables() and pmap_init() so that many fewer page table
pages have to be preallocated by create_pagetables().
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c18
-rw-r--r--sys/amd64/include/pmap.h4
2 files changed, 12 insertions, 10 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index a8c0b3c..642ff59 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -440,16 +440,16 @@ create_pagetables(vm_paddr_t *firstaddr)
/* Read-only from zero to physfree */
/* XXX not fully used, underneath 2M pages */
for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
- ((pt_entry_t *)KPTphys)[(KERNBASE - VM_MIN_KERNEL_ADDRESS) /
- PAGE_SIZE + i] = i << PAGE_SHIFT;
- ((pt_entry_t *)KPTphys)[(KERNBASE - VM_MIN_KERNEL_ADDRESS) /
- PAGE_SIZE + i] |= PG_RW | PG_V | PG_G;
+ ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
+ ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
}
/* Now map the page tables at their location within PTmap */
for (i = 0; i < NKPT; i++) {
- ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
- ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
+ ((pd_entry_t *)KPDphys)[(KERNBASE - VM_MIN_KERNEL_ADDRESS) /
+ NBPDR + i] = KPTphys + (i << PAGE_SHIFT);
+ ((pd_entry_t *)KPDphys)[(KERNBASE - VM_MIN_KERNEL_ADDRESS) /
+ NBPDR + i] |= PG_RW | PG_V;
}
/* Map from zero to end of allocations under 2M pages */
@@ -647,15 +647,17 @@ pmap_init(void)
* Initialize the vm page array entries for the kernel pmap's
* page table pages.
*/
- pd = pmap_pde(kernel_pmap, VM_MIN_KERNEL_ADDRESS);
+ pd = pmap_pde(kernel_pmap, KERNBASE);
for (i = 0; i < NKPT; i++) {
if ((pd[i] & (PG_PS | PG_V)) == (PG_PS | PG_V))
continue;
+ KASSERT((pd[i] & PG_V) != 0,
+ ("pmap_init: page table page is missing"));
mpte = PHYS_TO_VM_PAGE(pd[i] & PG_FRAME);
KASSERT(mpte >= vm_page_array &&
mpte < &vm_page_array[vm_page_array_size],
("pmap_init: page table page is out of range"));
- mpte->pindex = pmap_pde_pindex(VM_MIN_KERNEL_ADDRESS) + i;
+ mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
mpte->phys_addr = pd[i] & PG_FRAME;
}
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 54b4484..049922b 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -111,11 +111,11 @@
/* Initial number of kernel page tables. */
#ifndef NKPT
-#define NKPT 2688
+#define NKPT 32
#endif
#define NKPML4E 1 /* number of kernel PML4 slots */
-#define NKPDPE howmany(NKPT, NPDEPG)/* number of kernel PDP slots */
+#define NKPDPE 6 /* number of kernel PDP slots */
#define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
#define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
OpenPOWER on IntegriCloud