summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c242
-rw-r--r--sys/i386/i386/support.s12
-rw-r--r--sys/i386/include/cpufunc.h4
-rw-r--r--sys/i386/include/pmap.h26
4 files changed, 264 insertions, 20 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index eeb66f7..2f40833 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.146 1997/06/22 16:03:29 peter Exp $
+ * $Id: pmap.c,v 1.147 1997/06/25 20:07:50 tegge Exp $
*/
/*
@@ -161,6 +161,7 @@ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
static int pgeflag; /* PG_G or-in */
+static int pseflag; /* PG_PS or-in */
static int nkpt;
static vm_page_t nkpg;
@@ -220,10 +221,12 @@ static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex));
static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
+vm_offset_t pmap_kmem_choose(vm_offset_t addr) ;
#define PDSTACKMAX 6
static vm_offset_t pdstack[PDSTACKMAX];
static int pdstackptr;
+unsigned pdir4mb;
/*
* Routine: pmap_pte
@@ -244,6 +247,21 @@ pmap_pte(pmap, va)
}
/*
+ * Move the kernel virtual free pointer to the next
+ * 4MB. This is used to help improve performance
+ * by using a large (4MB) page for much of the kernel
+ * (.text, .data, .bss)
+ */
+vm_offset_t
+pmap_kmem_choose(vm_offset_t addr) {
+ vm_offset_t newaddr = addr;
+ if (cpu_feature & CPUID_PSE) {
+ newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
+ }
+ return newaddr;
+}
+
+/*
* Bootstrap the system enough to run with virtual memory.
*
* On the i386 this is called after mapping has already been enabled
@@ -273,6 +291,8 @@ pmap_bootstrap(firstaddr, loadaddr)
* in this calculation.
*/
virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
+ virtual_avail = pmap_kmem_choose(virtual_avail);
+
virtual_end = VM_MAX_KERNEL_ADDRESS;
/*
@@ -367,14 +387,86 @@ pmap_bootstrap(firstaddr, loadaddr)
invltlb();
+ pgeflag = 0;
#if !defined(SMP)
if (cpu_feature & CPUID_PGE)
pgeflag = PG_G;
- else
#endif /* !SMP */
- pgeflag = 0;
+
+/*
+ * Initialize the 4MB page size flag
+ */
+ pseflag = 0;
+/*
+ * The 4MB page version of the initial
+ * kernel page mapping.
+ */
+ pdir4mb = 0;
+
+ if (cpu_feature & CPUID_PSE) {
+ unsigned ptditmp;
+ /*
+ * Enable the PSE mode
+ */
+ load_cr4(rcr4() | CR4_PSE);
+
+ /*
+ * Note that we have enabled PSE mode
+ */
+ pseflag = PG_PS;
+ ptditmp = (unsigned) kernel_pmap->pm_pdir[KPTDI];
+ ptditmp &= ~(NBPDR - 1);
+ ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
+ pdir4mb = ptditmp;
+ /*
+ * We can do the mapping here for the single processor
+ * case. We simply ignore the old page table page from
+ * now on.
+ */
+#if !defined(SMP)
+ PTD[KPTDI] = (pd_entry_t) ptditmp;
+ kernel_pmap->pm_pdir[KPTDI] = (pd_entry_t) ptditmp;
+ invltlb();
+#endif
+ }
}
+#if defined(SMP)
+/*
+ * Set 4mb pdir for mp startup, and global flags
+ */
+void
+pmap_set_opt(unsigned *pdir) {
+ int i;
+
+ if (pseflag && (cpu_feature & CPUID_PSE)) {
+ load_cr4(rcr4() | CR4_PSE);
+ if (pdir4mb) {
+ (unsigned) pdir[KPTDI] = pdir4mb;
+ }
+ }
+
+ if (cpu_feature & CPUID_PGE) {
+ load_cr4(rcr4() | CR4_PGE);
+ for(i = KPTDI; i < KPTDI + nkpt; i++) {
+ if (pdir[i]) {
+ pdir[i] |= PG_G;
+ }
+ }
+ }
+}
+
+/*
+ * Setup the PTD for the boot processor
+ */
+void
+pmap_set_opt_bsp(void) {
+ pmap_set_opt((unsigned *)kernel_pmap->pm_pdir);
+ pmap_set_opt((unsigned *)PTD);
+ invltlb();
+}
+#endif
+
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
@@ -571,8 +663,15 @@ pmap_extract(pmap, va)
vm_offset_t va;
{
vm_offset_t rtval;
- if (pmap && *pmap_pde(pmap, va)) {
+ vm_offset_t pdirindex;
+ pdirindex = va >> PDRSHIFT;
+ if (pmap) {
unsigned *pte;
+ if (((rtval = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
+ rtval &= ~(NBPDR - 1);
+ rtval |= va & (NBPDR - 1);
+ return rtval;
+ }
pte = get_ptbase(pmap) + i386_btop(va);
rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
return rtval;
@@ -738,8 +837,7 @@ pmap_new_proc(p)
/*
* allocate object for the upages
*/
- upobj = vm_object_allocate( OBJT_DEFAULT,
- UPAGES);
+ upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
p->p_upages_obj = upobj;
/* get a kernel virtual address for the UPAGES for this proc */
@@ -1213,6 +1311,16 @@ pmap_allocpte(pmap, va)
ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
/*
+ * This supports switching from a 4MB page to a
+ * normal 4K page.
+ */
+ if (ptepa & PG_PS) {
+ pmap->pm_pdir[ptepindex] = 0;
+ ptepa = 0;
+ invltlb();
+ }
+
+ /*
* If the page table page is mapped, we just increment the
* hold count, and activate it.
*/
@@ -1328,7 +1436,7 @@ pmap_growkernel(vm_offset_t addr)
vm_page_remove(nkpg);
pmap_zero_page(VM_PAGE_TO_PHYS(nkpg));
}
- pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_RW);
+ pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_RW | pgeflag);
nkpg = NULL;
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
@@ -1690,7 +1798,8 @@ pmap_remove(pmap, sva, eva)
* common operation and easy to short circuit some
* code.
*/
- if ((sva + PAGE_SIZE) == eva) {
+ if (((sva + PAGE_SIZE) == eva) &&
+ (((unsigned) pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
pmap_remove_page(pmap, sva);
return;
}
@@ -1707,6 +1816,7 @@ pmap_remove(pmap, sva, eva)
eindex = i386_btop(eva);
for (; sindex < eindex; sindex = pdnxt) {
+ unsigned pdirindex;
/*
* Calculate index for next page table.
@@ -1714,7 +1824,14 @@ pmap_remove(pmap, sva, eva)
pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
if (pmap->pm_stats.resident_count == 0)
break;
- ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex));
+
+ pdirindex = sindex / NPDEPG;
+ if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
+ pmap->pm_pdir[pdirindex] = 0;
+ pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
+ anyvalid++;
+ continue;
+ }
/*
* Weed out invalid mappings. Note: we assume that the page
@@ -1867,8 +1984,17 @@ pmap_protect(pmap, sva, eva, prot)
for (; sindex < eindex; sindex = pdnxt) {
+ unsigned pdirindex;
+
pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
- ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex));
+
+ pdirindex = sindex / NPDEPG;
+ if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
+ (unsigned) pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
+ pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
+ anychanged++;
+ continue;
+ }
/*
* Weed out invalid mappings. Note: we assume that the page
@@ -1968,6 +2094,8 @@ pmap_enter(pmap, va, pa, prot, wired)
origpte = *(vm_offset_t *)pte;
pa &= PG_FRAME;
opa = origpte & PG_FRAME;
+ if (origpte & PG_PS)
+ panic("pmap_enter: attempted pmap_enter on 4MB page");
/*
* Mapping has not changed, must be protection or wiring change.
@@ -2108,6 +2236,8 @@ retry:
* the hold count, and activate it.
*/
if (ptepa) {
+ if (ptepa & PG_PS)
+ panic("pmap_enter_quick: unexpected mapping into 4MB page");
#if defined(PTPHINT)
if (pmap->pm_ptphint &&
(pmap->pm_ptphint->pindex == ptepindex)) {
@@ -2183,9 +2313,77 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
vm_page_t p, mpte;
int objpgs;
+ if (!pmap)
+ return;
+
+ /*
+ * This code maps large physical mmap regions into the
+ * processor address space. Note that some shortcuts
+ * are taken, but the code works.
+ */
+ if (pseflag &&
+ (object->type == OBJT_DEVICE) &&
+ ((size & (NBPDR - 1)) == 0) ) {
+ int i;
+ int s;
+ vm_page_t m[1];
+ unsigned int ptepindex;
+ int npdes;
+ vm_offset_t ptepa;
+
+ if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
+ return;
+
+ s = splhigh();
+retry:
+ p = vm_page_lookup(object, pindex);
+ if (p && (p->flags & PG_BUSY)) {
+ tsleep(p, PVM, "init4p", 0);
+ goto retry;
+ }
+ splx(s);
+
+ if (p == NULL) {
+ p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
+ if (p == NULL)
+ return;
+ m[0] = p;
+
+ if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
+ PAGE_WAKEUP(p);
+ vm_page_free(p);
+ return;
+ }
+
+ p = vm_page_lookup(object, pindex);
+ PAGE_WAKEUP(p);
+ }
+
+ ptepa = (vm_offset_t) VM_PAGE_TO_PHYS(p);
+ if (ptepa & (NBPDR - 1)) {
+ return;
+ }
+
+ p->valid = VM_PAGE_BITS_ALL;
+
+ pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
+ npdes = size >> PDRSHIFT;
+ for(i=0;i<npdes;i++) {
+ pmap->pm_pdir[ptepindex] =
+ (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_PS);
+ ptepa += NBPDR;
+ ptepindex += 1;
+ }
+ p->flags |= PG_MAPPED;
+#if 0
+ invltlb();
+#endif
+ return;
+ }
+
psize = i386_btop(size);
- if (!pmap || (object->type != OBJT_VNODE) ||
+ if ((object->type != OBJT_VNODE) ||
(limit && (psize > MAX_INIT_PT) &&
(object->resident_page_count > MAX_INIT_PT))) {
return;
@@ -2424,6 +2622,14 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex];
if (srcptepaddr == 0)
continue;
+
+ if (srcptepaddr & PG_PS) {
+ if (dst_pmap->pm_pdir[ptepindex] == 0) {
+ dst_pmap->pm_pdir[ptepindex] = (pd_entry_t) srcptepaddr;
+ dst_pmap->pm_stats.resident_count += NBPDR;
+ }
+ continue;
+ }
srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
if ((srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
@@ -2960,7 +3166,7 @@ pmap_mapdev(pa, size)
pa = pa & PG_FRAME;
for (tmpva = va; size > 0;) {
pte = (unsigned *)vtopte(tmpva);
- *pte = pa | PG_RW | PG_V;
+ *pte = pa | PG_RW | PG_V | pgeflag;
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
@@ -3028,6 +3234,18 @@ pmap_activate(struct proc *p)
vtophys(p->p_vmspace->vm_pmap.pm_pdir));
}
+vm_offset_t
+pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) {
+
+ if ((size < NBPDR) || (obj->type != OBJT_DEVICE)) {
+ return addr;
+ }
+
+ addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
+ return addr;
+}
+
+
#if defined(PMAP_DEBUG)
pmap_pid_dump(int pid) {
pmap_t pmap;
diff --git a/sys/i386/i386/support.s b/sys/i386/i386/support.s
index be18df8..3f5c5fb 100644
--- a/sys/i386/i386/support.s
+++ b/sys/i386/i386/support.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: support.s,v 1.53 1997/05/29 05:11:10 peter Exp $
+ * $Id: support.s,v 1.54 1997/06/22 16:03:33 peter Exp $
*/
#include "npx.h"
@@ -1522,6 +1522,16 @@ ENTRY(load_cr3)
movl %eax,%cr3
ret
+/* rcr4() */
+ENTRY(rcr4)
+ movl %cr4,%eax
+ ret
+
+/* void load_cr3(caddr_t cr3) */
+ENTRY(load_cr4)
+ movl 4(%esp),%eax
+ movl %eax,%cr4
+ ret
/*****************************************************************************/
/* setjump, longjump */
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index 693b568..41e1bd7 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: cpufunc.h,v 1.67 1997/05/21 22:56:04 jdp Exp $
+ * $Id: cpufunc.h,v 1.68 1997/05/31 09:13:03 peter Exp $
*/
/*
@@ -416,8 +416,10 @@ void wrmsr __P((u_int msr, quad_t newval));
void load_cr0 __P((u_long cr0));
void load_cr3 __P((u_long cr3));
+void load_cr4 __P((u_long cr4));
void ltr __P((u_short sel));
u_int rcr0 __P((void));
u_long rcr3 __P((void));
+u_long rcr4 __P((void));
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index f00d4f2..3265129 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
- * $Id: pmap.h,v 1.50 1997/04/26 11:45:41 peter Exp $
+ * $Id: pmap.h,v 1.51 1997/06/22 16:03:54 peter Exp $
*/
#ifndef _MACHINE_PMAP_H_
@@ -144,6 +144,7 @@ extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
extern pd_entry_t IdlePTD; /* physical address of "Idle" state directory */
#endif
+#ifdef KERNEL
/*
* virtual address to page table entry and
* to physical address. Likewise for alternate address space.
@@ -151,12 +152,9 @@ extern pd_entry_t IdlePTD; /* physical address of "Idle" state directory */
* the corresponding pde that in turn maps it.
*/
#define vtopte(va) (PTmap + i386_btop(va))
-#define vtophys(va) (((int) (*vtopte(va))&PG_FRAME) | ((int)(va) & PAGE_MASK))
#define avtopte(va) (APTmap + i386_btop(va))
-#define avtophys(va) (((int) (*avtopte(va))&PG_FRAME) | ((int)(va) & PAGE_MASK))
-#ifdef KERNEL
/*
* Routine: pmap_kextract
* Function:
@@ -166,10 +164,24 @@ extern pd_entry_t IdlePTD; /* physical address of "Idle" state directory */
static __inline vm_offset_t
pmap_kextract(vm_offset_t va)
{
- vm_offset_t pa = *(int *)vtopte(va);
- pa = (pa & PG_FRAME) | (va & PAGE_MASK);
+ vm_offset_t pa;
+ if ((pa = (vm_offset_t) PTD[va >> PDRSHIFT]) & PG_PS) {
+ pa = (pa & ~(NBPDR - 1)) | (va & (NBPDR - 1));
+ } else {
+ pa = *(vm_offset_t *)vtopte(va);
+ pa = (pa & PG_FRAME) | (va & PAGE_MASK);
+ }
return pa;
}
+
+#if 0
+#define vtophys(va) (((int) (*vtopte(va))&PG_FRAME) | ((int)(va) & PAGE_MASK))
+#else
+#define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
+#endif
+
+#define avtophys(va) (((int) (*avtopte(va))&PG_FRAME) | ((int)(va) & PAGE_MASK))
+
#endif
/*
@@ -237,6 +249,8 @@ pmap_t pmap_kernel __P((void));
void *pmap_mapdev __P((vm_offset_t, vm_size_t));
unsigned *pmap_pte __P((pmap_t, vm_offset_t)) __pure2;
vm_page_t pmap_use_pt __P((pmap_t, vm_offset_t));
+void pmap_set_opt __P((unsigned *));
+void pmap_set_opt_bsp __P((void));
#endif /* KERNEL */
OpenPOWER on IntegriCloud