summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/locore.S52
-rw-r--r--sys/amd64/amd64/locore.s52
-rw-r--r--sys/amd64/amd64/pmap.c59
-rw-r--r--sys/amd64/include/specialreg.h35
-rw-r--r--sys/i386/i386/locore.s52
-rw-r--r--sys/i386/i386/pmap.c59
-rw-r--r--sys/i386/include/specialreg.h35
7 files changed, 255 insertions, 89 deletions
diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S
index 07cad14..13c042e 100644
--- a/sys/amd64/amd64/locore.S
+++ b/sys/amd64/amd64/locore.S
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
- * $Id: locore.s,v 1.74 1996/10/05 10:43:58 jkh Exp $
+ * $Id: locore.s,v 1.75 1996/10/09 19:47:16 bde Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@@ -164,9 +164,10 @@ _bdb_exists: .long 0
* prot = protection bits
*/
#define fillkpt(base, prot) \
- shll $2, %ebx ; \
- addl base, %ebx ; \
- orl $PG_V+prot, %eax ; \
+ shll $2,%ebx ; \
+ addl base,%ebx ; \
+ orl $PG_V,%eax ; \
+ orl prot,%eax ; \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
@@ -699,6 +700,13 @@ identify_cpu:
create_pagetables:
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 1f
+ movl %cr4, %eax
+ orl $CR4_PGE, %eax
+ movl %eax, %cr4
+1:
+
/* Find end of kernel image (rounded up to a page boundary). */
movl $R(_end),%esi
@@ -744,70 +752,80 @@ over_symalloc:
cmpl $0,R(_bdb_exists)
jne map_read_write
#endif
- movl $R(_etext),%ecx
+ xorl %edx,%edx
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 2f
+ orl $PG_G,%edx
+
+2: movl $R(_etext),%ecx
addl $PAGE_MASK,%ecx
shrl $PAGE_SHIFT,%ecx
- fillkptphys(0)
+ fillkptphys(%edx)
/* Map read-write, data, bss and symbols */
movl $R(_etext),%eax
addl $PAGE_MASK, %eax
andl $~PAGE_MASK, %eax
map_read_write:
- movl R(_KERNend),%ecx
+ movl $PG_RW,%edx
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 1f
+ orl $PG_G,%edx
+
+1: movl R(_KERNend),%ecx
subl %eax,%ecx
shrl $PAGE_SHIFT,%ecx
- fillkptphys(PG_RW)
+ fillkptphys(%edx)
/* Map page directory. */
movl R(_IdlePTD), %eax
movl $1, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map proc0's page table for the UPAGES. */
movl R(p0upt), %eax
movl $1, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map proc0's UPAGES in the physical way ... */
movl R(p0upa), %eax
movl $UPAGES, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
- fillkptphys(PG_RW|PG_N)
+ fillkptphys($PG_RW|PG_N)
/* Map proc0s UPAGES in the special page table for this purpose ... */
movl R(p0upa), %eax
movl $KSTKPTEOFF, %ebx
movl $UPAGES, %ecx
- fillkpt(R(p0upt), PG_RW)
+ fillkpt(R(p0upt), $PG_RW)
/* ... and put the page table in the pde. */
movl R(p0upt), %eax
movl $KSTKPTDI, %ebx
movl $1, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install a pde for temporary double map of bottom of VA */
movl R(_KPTphys), %eax
xorl %ebx, %ebx
movl $1, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install pde's for pt's */
movl R(_KPTphys), %eax
movl $KPTDI, %ebx
movl $NKPT, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install a pde recursively mapping page directory as a page table */
movl R(_IdlePTD), %eax
movl $PTDPTDI, %ebx
movl $1,%ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
ret
diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s
index 07cad14..13c042e 100644
--- a/sys/amd64/amd64/locore.s
+++ b/sys/amd64/amd64/locore.s
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
- * $Id: locore.s,v 1.74 1996/10/05 10:43:58 jkh Exp $
+ * $Id: locore.s,v 1.75 1996/10/09 19:47:16 bde Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@@ -164,9 +164,10 @@ _bdb_exists: .long 0
* prot = protection bits
*/
#define fillkpt(base, prot) \
- shll $2, %ebx ; \
- addl base, %ebx ; \
- orl $PG_V+prot, %eax ; \
+ shll $2,%ebx ; \
+ addl base,%ebx ; \
+ orl $PG_V,%eax ; \
+ orl prot,%eax ; \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
@@ -699,6 +700,13 @@ identify_cpu:
create_pagetables:
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 1f
+ movl %cr4, %eax
+ orl $CR4_PGE, %eax
+ movl %eax, %cr4
+1:
+
/* Find end of kernel image (rounded up to a page boundary). */
movl $R(_end),%esi
@@ -744,70 +752,80 @@ over_symalloc:
cmpl $0,R(_bdb_exists)
jne map_read_write
#endif
- movl $R(_etext),%ecx
+ xorl %edx,%edx
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 2f
+ orl $PG_G,%edx
+
+2: movl $R(_etext),%ecx
addl $PAGE_MASK,%ecx
shrl $PAGE_SHIFT,%ecx
- fillkptphys(0)
+ fillkptphys(%edx)
/* Map read-write, data, bss and symbols */
movl $R(_etext),%eax
addl $PAGE_MASK, %eax
andl $~PAGE_MASK, %eax
map_read_write:
- movl R(_KERNend),%ecx
+ movl $PG_RW,%edx
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 1f
+ orl $PG_G,%edx
+
+1: movl R(_KERNend),%ecx
subl %eax,%ecx
shrl $PAGE_SHIFT,%ecx
- fillkptphys(PG_RW)
+ fillkptphys(%edx)
/* Map page directory. */
movl R(_IdlePTD), %eax
movl $1, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map proc0's page table for the UPAGES. */
movl R(p0upt), %eax
movl $1, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map proc0's UPAGES in the physical way ... */
movl R(p0upa), %eax
movl $UPAGES, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
- fillkptphys(PG_RW|PG_N)
+ fillkptphys($PG_RW|PG_N)
/* Map proc0s UPAGES in the special page table for this purpose ... */
movl R(p0upa), %eax
movl $KSTKPTEOFF, %ebx
movl $UPAGES, %ecx
- fillkpt(R(p0upt), PG_RW)
+ fillkpt(R(p0upt), $PG_RW)
/* ... and put the page table in the pde. */
movl R(p0upt), %eax
movl $KSTKPTDI, %ebx
movl $1, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install a pde for temporary double map of bottom of VA */
movl R(_KPTphys), %eax
xorl %ebx, %ebx
movl $1, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install pde's for pt's */
movl R(_KPTphys), %eax
movl $KPTDI, %ebx
movl $NKPT, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install a pde recursively mapping page directory as a page table */
movl R(_IdlePTD), %eax
movl $PTDPTDI, %ebx
movl $1,%ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
ret
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 0ab922c..58a079f 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.129 1996/11/03 03:40:47 dyson Exp $
+ * $Id: pmap.c,v 1.130 1996/11/07 14:44:01 joerg Exp $
*/
/*
@@ -97,6 +97,7 @@
#include <machine/pcb.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
+#include <machine/specialreg.h>
#define PMAP_KEEP_PDIRS
@@ -148,6 +149,7 @@ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
+static int pgeflag; /* PG_G or-in */
static int nkpt;
static vm_page_t nkpg;
@@ -162,23 +164,23 @@ extern int cpu_class;
* Data for the pv entry allocation mechanism
*/
static int pv_freelistcnt;
-TAILQ_HEAD (,pv_entry) pv_freelist;
+TAILQ_HEAD (,pv_entry) pv_freelist = {0};
static vm_offset_t pvva;
static int npvvapg;
/*
* All those kernel PT submaps that BSD is so fond of
*/
-pt_entry_t *CMAP1;
+pt_entry_t *CMAP1 = 0;
static pt_entry_t *CMAP2, *ptmmap;
static pv_table_t *pv_table;
-caddr_t CADDR1, ptvmmap;
+caddr_t CADDR1 = 0, ptvmmap = 0;
static caddr_t CADDR2;
static pt_entry_t *msgbufmap;
-struct msgbuf *msgbufp;
+struct msgbuf *msgbufp=0;
-pt_entry_t *PMAP1;
-unsigned *PADDR1;
+pt_entry_t *PMAP1 = 0;
+unsigned *PADDR1 = 0;
static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static unsigned * get_ptbase __P((pmap_t pmap));
@@ -320,6 +322,10 @@ pmap_bootstrap(firstaddr, loadaddr)
*(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0;
invltlb();
+ if (cpu_feature & CPUID_PGE)
+ pgeflag = PG_G;
+ else
+ pgeflag = 0;
}
@@ -572,7 +578,7 @@ pmap_qenter(va, m, count)
for (i = 0; i < count; i++) {
vm_offset_t tva = va + i * PAGE_SIZE;
- unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V;
+ unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V | pgeflag;
unsigned opte;
pte = (unsigned *)vtopte(tva);
opte = *pte;
@@ -615,7 +621,7 @@ pmap_kenter(va, pa)
register unsigned *pte;
unsigned npte, opte;
- npte = pa | PG_RW | PG_V;
+ npte = pa | PG_RW | PG_V | pgeflag;
pte = (unsigned *)vtopte(va);
opte = *pte;
*pte = npte;
@@ -873,18 +879,18 @@ retry:
* This routine unholds page table pages, and if the hold count
* drops to zero, then it decrements the wire count.
*/
-static int
-pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
+static int
+_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
int s;
- vm_page_unhold(m);
-
- s = splvm();
- while (m->flags & PG_BUSY) {
- m->flags |= PG_WANTED;
- tsleep(m, PVM, "pmuwpt", 0);
+ if (m->flags & PG_BUSY) {
+ s = splvm();
+ while (m->flags & PG_BUSY) {
+ m->flags |= PG_WANTED;
+ tsleep(m, PVM, "pmuwpt", 0);
+ }
+ splx(s);
}
- splx(s);
if (m->hold_count == 0) {
vm_offset_t pteva;
@@ -927,6 +933,15 @@ pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
return 0;
}
+__inline static int
+pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
+ vm_page_unhold(m);
+ if (m->hold_count == 0)
+ return _pmap_unwire_pte_hold(pmap, m);
+ else
+ return 0;
+}
+
/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
@@ -1570,6 +1585,12 @@ pmap_remove_pte(pmap, ptq, va)
*ptq = 0;
if (oldpte & PG_W)
pmap->pm_stats.wired_count -= 1;
+ /*
+ * Machines that don't support invlpg, also don't support
+ * PG_G.
+ */
+ if (oldpte & PG_G)
+ invlpg(va);
pmap->pm_stats.resident_count -= 1;
if (oldpte & PG_MANAGED) {
ppv = pa_to_pvh(oldpte);
@@ -1998,6 +2019,8 @@ validate:
newpte |= PG_W;
if (va < UPT_MIN_ADDRESS)
newpte |= PG_U;
+ if (pmap == kernel_pmap)
+ newpte |= pgeflag;
/*
* if the mapping or permission bits are different, we need
diff --git a/sys/amd64/include/specialreg.h b/sys/amd64/include/specialreg.h
index 9c62438..25e5e63 100644
--- a/sys/amd64/include/specialreg.h
+++ b/sys/amd64/include/specialreg.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)specialreg.h 7.1 (Berkeley) 5/9/91
- * $Id: specialreg.h,v 1.8 1996/01/30 22:55:06 mpp Exp $
+ * $Id: specialreg.h,v 1.9 1996/06/03 19:37:38 sos Exp $
*/
#ifndef _MACHINE_SPECIALREG_H_
@@ -61,6 +61,39 @@
#define CR0_CD 0x40000000 /* Cache Disable */
/*
+ * Bits in PPro special registers
+ */
+#define CR4_VME 0x00000001 /* Virtual 8086 mode extensions */
+#define CR4_PVI 0x00000002 /* Protected-mode virtual interrupts */
+#define CR4_TSD 0x00000004 /* Time stamp disable */
+#define CR4_DE 0x00000008 /* Debugging extensions */
+#define CR4_PSE 0x00000010 /* Page size extensions */
+#define CR4_PAE 0x00000020 /* Physical address extension */
+#define CR4_MCE 0x00000040 /* Machine check enable */
+#define CR4_PGE 0x00000080 /* Page global enable */
+#define CR4_PCE 0x00000100 /* Performance monitoring counter enable */
+
+/*
+ * CPUID instruction features register
+ */
+#define CPUID_FPU 0x0001
+#define CPUID_VME 0x0002
+#define CPUID_DE 0x0004
+#define CPUID_PSE 0x0008
+#define CPUID_TSC 0x0010
+#define CPUID_MSR 0x0020
+#define CPUID_PAE 0x0040
+#define CPUID_MCE 0x0080
+#define CPUID_CX8 0x0100
+#define CPUID_APIC 0x0200
+#define CPUID_B10 0x0400
+#define CPUID_B11 0x0800
+#define CPUID_MTRR 0x1000
+#define CPUID_PGE 0x2000
+#define CPUID_MCA 0x4000
+#define CPUID_CMOV 0x8000
+
+/*
* Cyrix 486 DLC special registers, accessible as IO ports.
*/
#define CCR0 0xc0 /* configuration control register 0 */
diff --git a/sys/i386/i386/locore.s b/sys/i386/i386/locore.s
index 07cad14..13c042e 100644
--- a/sys/i386/i386/locore.s
+++ b/sys/i386/i386/locore.s
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
- * $Id: locore.s,v 1.74 1996/10/05 10:43:58 jkh Exp $
+ * $Id: locore.s,v 1.75 1996/10/09 19:47:16 bde Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@@ -164,9 +164,10 @@ _bdb_exists: .long 0
* prot = protection bits
*/
#define fillkpt(base, prot) \
- shll $2, %ebx ; \
- addl base, %ebx ; \
- orl $PG_V+prot, %eax ; \
+ shll $2,%ebx ; \
+ addl base,%ebx ; \
+ orl $PG_V,%eax ; \
+ orl prot,%eax ; \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
@@ -699,6 +700,13 @@ identify_cpu:
create_pagetables:
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 1f
+ movl %cr4, %eax
+ orl $CR4_PGE, %eax
+ movl %eax, %cr4
+1:
+
/* Find end of kernel image (rounded up to a page boundary). */
movl $R(_end),%esi
@@ -744,70 +752,80 @@ over_symalloc:
cmpl $0,R(_bdb_exists)
jne map_read_write
#endif
- movl $R(_etext),%ecx
+ xorl %edx,%edx
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 2f
+ orl $PG_G,%edx
+
+2: movl $R(_etext),%ecx
addl $PAGE_MASK,%ecx
shrl $PAGE_SHIFT,%ecx
- fillkptphys(0)
+ fillkptphys(%edx)
/* Map read-write, data, bss and symbols */
movl $R(_etext),%eax
addl $PAGE_MASK, %eax
andl $~PAGE_MASK, %eax
map_read_write:
- movl R(_KERNend),%ecx
+ movl $PG_RW,%edx
+ testl $CPUID_PGE, R(_cpu_feature)
+ jz 1f
+ orl $PG_G,%edx
+
+1: movl R(_KERNend),%ecx
subl %eax,%ecx
shrl $PAGE_SHIFT,%ecx
- fillkptphys(PG_RW)
+ fillkptphys(%edx)
/* Map page directory. */
movl R(_IdlePTD), %eax
movl $1, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map proc0's page table for the UPAGES. */
movl R(p0upt), %eax
movl $1, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map proc0's UPAGES in the physical way ... */
movl R(p0upa), %eax
movl $UPAGES, %ecx
- fillkptphys(PG_RW)
+ fillkptphys($PG_RW)
/* Map ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
- fillkptphys(PG_RW|PG_N)
+ fillkptphys($PG_RW|PG_N)
/* Map proc0s UPAGES in the special page table for this purpose ... */
movl R(p0upa), %eax
movl $KSTKPTEOFF, %ebx
movl $UPAGES, %ecx
- fillkpt(R(p0upt), PG_RW)
+ fillkpt(R(p0upt), $PG_RW)
/* ... and put the page table in the pde. */
movl R(p0upt), %eax
movl $KSTKPTDI, %ebx
movl $1, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install a pde for temporary double map of bottom of VA */
movl R(_KPTphys), %eax
xorl %ebx, %ebx
movl $1, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install pde's for pt's */
movl R(_KPTphys), %eax
movl $KPTDI, %ebx
movl $NKPT, %ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
/* install a pde recursively mapping page directory as a page table */
movl R(_IdlePTD), %eax
movl $PTDPTDI, %ebx
movl $1,%ecx
- fillkpt(R(_IdlePTD), PG_RW)
+ fillkpt(R(_IdlePTD), $PG_RW)
ret
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 0ab922c..58a079f 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.129 1996/11/03 03:40:47 dyson Exp $
+ * $Id: pmap.c,v 1.130 1996/11/07 14:44:01 joerg Exp $
*/
/*
@@ -97,6 +97,7 @@
#include <machine/pcb.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
+#include <machine/specialreg.h>
#define PMAP_KEEP_PDIRS
@@ -148,6 +149,7 @@ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
+static int pgeflag; /* PG_G or-in */
static int nkpt;
static vm_page_t nkpg;
@@ -162,23 +164,23 @@ extern int cpu_class;
* Data for the pv entry allocation mechanism
*/
static int pv_freelistcnt;
-TAILQ_HEAD (,pv_entry) pv_freelist;
+TAILQ_HEAD (,pv_entry) pv_freelist = {0};
static vm_offset_t pvva;
static int npvvapg;
/*
* All those kernel PT submaps that BSD is so fond of
*/
-pt_entry_t *CMAP1;
+pt_entry_t *CMAP1 = 0;
static pt_entry_t *CMAP2, *ptmmap;
static pv_table_t *pv_table;
-caddr_t CADDR1, ptvmmap;
+caddr_t CADDR1 = 0, ptvmmap = 0;
static caddr_t CADDR2;
static pt_entry_t *msgbufmap;
-struct msgbuf *msgbufp;
+struct msgbuf *msgbufp=0;
-pt_entry_t *PMAP1;
-unsigned *PADDR1;
+pt_entry_t *PMAP1 = 0;
+unsigned *PADDR1 = 0;
static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static unsigned * get_ptbase __P((pmap_t pmap));
@@ -320,6 +322,10 @@ pmap_bootstrap(firstaddr, loadaddr)
*(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0;
invltlb();
+ if (cpu_feature & CPUID_PGE)
+ pgeflag = PG_G;
+ else
+ pgeflag = 0;
}
@@ -572,7 +578,7 @@ pmap_qenter(va, m, count)
for (i = 0; i < count; i++) {
vm_offset_t tva = va + i * PAGE_SIZE;
- unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V;
+ unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V | pgeflag;
unsigned opte;
pte = (unsigned *)vtopte(tva);
opte = *pte;
@@ -615,7 +621,7 @@ pmap_kenter(va, pa)
register unsigned *pte;
unsigned npte, opte;
- npte = pa | PG_RW | PG_V;
+ npte = pa | PG_RW | PG_V | pgeflag;
pte = (unsigned *)vtopte(va);
opte = *pte;
*pte = npte;
@@ -873,18 +879,18 @@ retry:
* This routine unholds page table pages, and if the hold count
* drops to zero, then it decrements the wire count.
*/
-static int
-pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
+static int
+_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
int s;
- vm_page_unhold(m);
-
- s = splvm();
- while (m->flags & PG_BUSY) {
- m->flags |= PG_WANTED;
- tsleep(m, PVM, "pmuwpt", 0);
+ if (m->flags & PG_BUSY) {
+ s = splvm();
+ while (m->flags & PG_BUSY) {
+ m->flags |= PG_WANTED;
+ tsleep(m, PVM, "pmuwpt", 0);
+ }
+ splx(s);
}
- splx(s);
if (m->hold_count == 0) {
vm_offset_t pteva;
@@ -927,6 +933,15 @@ pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
return 0;
}
+__inline static int
+pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
+ vm_page_unhold(m);
+ if (m->hold_count == 0)
+ return _pmap_unwire_pte_hold(pmap, m);
+ else
+ return 0;
+}
+
/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
@@ -1570,6 +1585,12 @@ pmap_remove_pte(pmap, ptq, va)
*ptq = 0;
if (oldpte & PG_W)
pmap->pm_stats.wired_count -= 1;
+ /*
+ * Machines that don't support invlpg, also don't support
+ * PG_G.
+ */
+ if (oldpte & PG_G)
+ invlpg(va);
pmap->pm_stats.resident_count -= 1;
if (oldpte & PG_MANAGED) {
ppv = pa_to_pvh(oldpte);
@@ -1998,6 +2019,8 @@ validate:
newpte |= PG_W;
if (va < UPT_MIN_ADDRESS)
newpte |= PG_U;
+ if (pmap == kernel_pmap)
+ newpte |= pgeflag;
/*
* if the mapping or permission bits are different, we need
diff --git a/sys/i386/include/specialreg.h b/sys/i386/include/specialreg.h
index 9c62438..25e5e63 100644
--- a/sys/i386/include/specialreg.h
+++ b/sys/i386/include/specialreg.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)specialreg.h 7.1 (Berkeley) 5/9/91
- * $Id: specialreg.h,v 1.8 1996/01/30 22:55:06 mpp Exp $
+ * $Id: specialreg.h,v 1.9 1996/06/03 19:37:38 sos Exp $
*/
#ifndef _MACHINE_SPECIALREG_H_
@@ -61,6 +61,39 @@
#define CR0_CD 0x40000000 /* Cache Disable */
/*
+ * Bits in PPro special registers
+ */
+#define CR4_VME 0x00000001 /* Virtual 8086 mode extensions */
+#define CR4_PVI 0x00000002 /* Protected-mode virtual interrupts */
+#define CR4_TSD 0x00000004 /* Time stamp disable */
+#define CR4_DE 0x00000008 /* Debugging extensions */
+#define CR4_PSE 0x00000010 /* Page size extensions */
+#define CR4_PAE 0x00000020 /* Physical address extension */
+#define CR4_MCE 0x00000040 /* Machine check enable */
+#define CR4_PGE 0x00000080 /* Page global enable */
+#define CR4_PCE 0x00000100 /* Performance monitoring counter enable */
+
+/*
+ * CPUID instruction features register
+ */
+#define CPUID_FPU 0x0001
+#define CPUID_VME 0x0002
+#define CPUID_DE 0x0004
+#define CPUID_PSE 0x0008
+#define CPUID_TSC 0x0010
+#define CPUID_MSR 0x0020
+#define CPUID_PAE 0x0040
+#define CPUID_MCE 0x0080
+#define CPUID_CX8 0x0100
+#define CPUID_APIC 0x0200
+#define CPUID_B10 0x0400
+#define CPUID_B11 0x0800
+#define CPUID_MTRR 0x1000
+#define CPUID_PGE 0x2000
+#define CPUID_MCA 0x4000
+#define CPUID_CMOV 0x8000
+
+/*
* Cyrix 486 DLC special registers, accessible as IO ports.
*/
#define CCR0 0xc0 /* configuration control register 0 */
OpenPOWER on IntegriCloud