diff options
author | dyson <dyson@FreeBSD.org> | 1997-12-14 02:11:23 +0000 |
---|---|---|
committer | dyson <dyson@FreeBSD.org> | 1997-12-14 02:11:23 +0000 |
commit | 738872cad6b501578d2839e485ca5a73de1bcee5 (patch) | |
tree | 03005c15fc08329aab24f55b693fb72f704b6dee /sys | |
parent | acf3f6c34674e4d5895a65c2500aa558ceeac52a (diff) | |
download | FreeBSD-src-738872cad6b501578d2839e485ca5a73de1bcee5.zip FreeBSD-src-738872cad6b501578d2839e485ca5a73de1bcee5.tar.gz |
After one of my analysis passes to evaluate methods for SMP TLB mgmt, I
noticed some major enhancements available for UP situations. The number
of UP TLB flushes is decreased much more than significantly with these
changes. Since a TLB flush appears to cost minimally approx 80 cycles,
this is a "nice" enhancement, equiv to eliminating between 40 and 160
instructions per TLB flush.
Changes include making sure that kernel threads all use the same PTD,
and eliminate unneeded PTD switches at context switch time.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/amd64/amd64/cpu_switch.S | 57 | ||||
-rw-r--r-- | sys/amd64/amd64/machdep.c | 10 | ||||
-rw-r--r-- | sys/amd64/amd64/pmap.c | 27 | ||||
-rw-r--r-- | sys/amd64/amd64/support.S | 5 | ||||
-rw-r--r-- | sys/amd64/amd64/support.s | 5 | ||||
-rw-r--r-- | sys/amd64/amd64/swtch.s | 57 | ||||
-rw-r--r-- | sys/amd64/include/cpufunc.h | 8 | ||||
-rw-r--r-- | sys/i386/i386/machdep.c | 10 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 27 | ||||
-rw-r--r-- | sys/i386/i386/support.s | 5 | ||||
-rw-r--r-- | sys/i386/i386/swtch.s | 57 | ||||
-rw-r--r-- | sys/i386/include/cpufunc.h | 8 | ||||
-rw-r--r-- | sys/kern/init_main.c | 4 | ||||
-rw-r--r-- | sys/vm/pmap.h | 3 |
14 files changed, 241 insertions, 42 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index 4d57b64..df0bda8 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.63 1997/09/21 15:03:58 peter Exp $ + * $Id: swtch.s,v 1.64 1997/10/10 09:44:06 peter Exp $ */ #include "npx.h" @@ -86,6 +86,11 @@ _hlt_vector: .long _default_halt /* pointer to halt routine */ .globl _want_resched _want_resched: .long 0 /* we need to re-run the scheduler */ +#if defined(SWTCH_OPTIM_STATS) + .globl _swtch_optim_stats, _tlb_flush_count +_swtch_optim_stats: .long 0 /* number of _swtch_optims */ +_tlb_flush_count: .long 0 +#endif .text /* @@ -252,6 +257,9 @@ _idle: /* use our idleproc's "context" */ movl _my_idlePTD,%ecx movl %ecx,%cr3 +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl $_idlestack_top,%ecx movl %ecx,%esp @@ -259,8 +267,7 @@ _idle: #ifdef VM86 movl _my_tr, %esi #endif /* VM86 */ - movl $_common_tss, %eax - movl %ecx, TSS_ESP0(%eax) + movl %ecx, _common_tss + TSS_ESP0 #ifdef VM86 btrl %esi, _private_tss @@ -302,6 +309,9 @@ _idle: .globl idle_loop idle_loop: +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl %cr3,%eax /* ouch! */ movl %eax,%cr3 @@ -357,15 +367,27 @@ idle_loop: #else xorl %ebp,%ebp movl $HIDENAME(tmpstk),%esp - movl _IdlePTD,%ecx - movl %ecx,%cr3 +#if defined(OVERLY_CONSERVATIVE_PTD_MGMT) +#if defined(SWTCH_OPTIM_STATS) + incl _swtch_optim_stats +#endif + movl _IdlePTD, %ecx + movl %cr3, %eax + cmpl %ecx, %eax + je 2f +#if defined(SWTCH_OPTIM_STATS) + decl _swtch_optim_stats + incl _tlb_flush_count +#endif + movl %ecx, %cr3 +2: +#endif /* update common_tss.tss_esp0 pointer */ #ifdef VM86 movl _my_tr, %esi #endif /* VM86 */ - movl $_common_tss, %eax - movl %esp, TSS_ESP0(%eax) + movl %esp, _common_tss + TSS_ESP0 #ifdef VM86 btrl %esi, _private_tss @@ -576,16 +598,28 @@ swtch_com: movl %eax,P_BACK(%ecx) /* isolate process to run */ movl P_ADDR(%ecx),%edx - movl PCB_CR3(%edx),%ebx #ifdef SMP + movl PCB_CR3(%edx),%ebx /* Grab the private PT pointer from the outgoing process's PTD */ movl $_PTD, %esi movl 4*MPPTDI(%esi), %eax /* fetch cpu's prv pt */ -#endif /* SMP */ - +#else +#if defined(SWTCH_OPTIM_STATS) + incl _swtch_optim_stats +#endif /* switch address space */ + movl %cr3,%ebx + cmpl PCB_CR3(%edx),%ebx + je 4f +#if defined(SWTCH_OPTIM_STATS) + decl _swtch_optim_stats + incl _tlb_flush_count +#endif + movl PCB_CR3(%edx),%ebx +#endif /* SMP */ movl %ebx,%cr3 +4: #ifdef SMP /* Copy the private PT to the new process's PTD */ @@ -597,6 +631,9 @@ swtch_com: */ movl %eax, 4*MPPTDI(%esi) /* restore cpu's prv page */ +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif /* XXX: we have just changed the page tables.. reload.. */ movl %ebx, %cr3 #endif /* SMP */ diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index d6db6e6..0aeefb2 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 - * $Id: machdep.c,v 1.277 1997/12/04 14:35:39 jkh Exp $ + * $Id: machdep.c,v 1.278 1997/12/04 21:21:24 jmg Exp $ */ #include "apm.h" @@ -150,6 +150,14 @@ int msgbufmapped = 0; /* set when safe to use msgbuf */ int _udatasel, _ucodesel; u_int atdevbase; +#if defined(SWTCH_OPTIM_STATS) +extern int swtch_optim_stats; +SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, + CTLFLAG_RD, &swtch_optim_stats, 0, ""); +SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, + CTLFLAG_RD, &tlb_flush_count, 0, ""); +#endif + int physmem = 0; int cold = 1; diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 2a8561f..0286bdb 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.172 1997/11/07 19:58:34 tegge Exp $ + * $Id: pmap.c,v 1.173 1997/11/20 19:30:31 bde Exp $ */ /* @@ -1116,6 +1116,28 @@ pmap_unuse_pt(pmap, va, mpte) return pmap_unwire_pte_hold(pmap, mpte); } +#if !defined(SMP) +void +pmap_pinit0(pmap) + struct pmap *pmap; +{ + pmap->pm_pdir = + (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); + pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD); + pmap->pm_flags = 0; + pmap->pm_count = 1; + pmap->pm_ptphint = NULL; + TAILQ_INIT(&pmap->pm_pvlist); +} +#else +void +pmap_pinit0(pmap) + struct pmap *pmap; +{ + pmap_pinit(pmap); +} +#endif + /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. @@ -3247,6 +3269,9 @@ pmap_mincore(pmap, addr) void pmap_activate(struct proc *p) { +#if defined(SWTCH_OPTIM_STATS) + ++tlb_flush_count; +#endif load_cr3(p->p_addr->u_pcb.pcb_cr3 = vtophys(p->p_vmspace->vm_pmap.pm_pdir)); } diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S index 2ae3e20..3b9970f 100644 --- a/sys/amd64/amd64/support.S +++ b/sys/amd64/amd64/support.S @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: support.s,v 1.56 1997/08/09 00:02:44 dyson Exp $ + * $Id: support.s,v 1.57 1997/09/02 20:05:30 bde Exp $ */ #include "npx.h" @@ -1517,6 +1517,9 @@ ENTRY(rcr3) /* void load_cr3(caddr_t cr3) */ ENTRY(load_cr3) +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl 4(%esp),%eax movl %eax,%cr3 ret diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s index 2ae3e20..3b9970f 100644 --- a/sys/amd64/amd64/support.s +++ b/sys/amd64/amd64/support.s @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: support.s,v 1.56 1997/08/09 00:02:44 dyson Exp $ + * $Id: support.s,v 1.57 1997/09/02 20:05:30 bde Exp $ */ #include "npx.h" @@ -1517,6 +1517,9 @@ ENTRY(rcr3) /* void load_cr3(caddr_t cr3) */ ENTRY(load_cr3) +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl 4(%esp),%eax movl %eax,%cr3 ret diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s index 4d57b64..df0bda8 100644 --- a/sys/amd64/amd64/swtch.s +++ b/sys/amd64/amd64/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.63 1997/09/21 15:03:58 peter Exp $ + * $Id: swtch.s,v 1.64 1997/10/10 09:44:06 peter Exp $ */ #include "npx.h" @@ -86,6 +86,11 @@ _hlt_vector: .long _default_halt /* pointer to halt routine */ .globl _want_resched _want_resched: .long 0 /* we need to re-run the scheduler */ +#if defined(SWTCH_OPTIM_STATS) + .globl _swtch_optim_stats, _tlb_flush_count +_swtch_optim_stats: .long 0 /* number of _swtch_optims */ +_tlb_flush_count: .long 0 +#endif .text /* @@ -252,6 +257,9 @@ _idle: /* use our idleproc's "context" */ movl _my_idlePTD,%ecx movl %ecx,%cr3 +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl $_idlestack_top,%ecx movl %ecx,%esp @@ -259,8 +267,7 @@ _idle: #ifdef VM86 movl _my_tr, %esi #endif /* VM86 */ - movl $_common_tss, %eax - movl %ecx, TSS_ESP0(%eax) + movl %ecx, _common_tss + TSS_ESP0 #ifdef VM86 btrl %esi, _private_tss @@ -302,6 +309,9 @@ _idle: .globl idle_loop idle_loop: +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl %cr3,%eax /* ouch! */ movl %eax,%cr3 @@ -357,15 +367,27 @@ idle_loop: #else xorl %ebp,%ebp movl $HIDENAME(tmpstk),%esp - movl _IdlePTD,%ecx - movl %ecx,%cr3 +#if defined(OVERLY_CONSERVATIVE_PTD_MGMT) +#if defined(SWTCH_OPTIM_STATS) + incl _swtch_optim_stats +#endif + movl _IdlePTD, %ecx + movl %cr3, %eax + cmpl %ecx, %eax + je 2f +#if defined(SWTCH_OPTIM_STATS) + decl _swtch_optim_stats + incl _tlb_flush_count +#endif + movl %ecx, %cr3 +2: +#endif /* update common_tss.tss_esp0 pointer */ #ifdef VM86 movl _my_tr, %esi #endif /* VM86 */ - movl $_common_tss, %eax - movl %esp, TSS_ESP0(%eax) + movl %esp, _common_tss + TSS_ESP0 #ifdef VM86 btrl %esi, _private_tss @@ -576,16 +598,28 @@ swtch_com: movl %eax,P_BACK(%ecx) /* isolate process to run */ movl P_ADDR(%ecx),%edx - movl PCB_CR3(%edx),%ebx #ifdef SMP + movl PCB_CR3(%edx),%ebx /* Grab the private PT pointer from the outgoing process's PTD */ movl $_PTD, %esi movl 4*MPPTDI(%esi), %eax /* fetch cpu's prv pt */ -#endif /* SMP */ - +#else +#if defined(SWTCH_OPTIM_STATS) + incl _swtch_optim_stats +#endif /* switch address space */ + movl %cr3,%ebx + cmpl PCB_CR3(%edx),%ebx + je 4f +#if defined(SWTCH_OPTIM_STATS) + decl _swtch_optim_stats + incl _tlb_flush_count +#endif + movl PCB_CR3(%edx),%ebx +#endif /* SMP */ movl %ebx,%cr3 +4: #ifdef SMP /* Copy the private PT to the new process's PTD */ @@ -597,6 +631,9 @@ swtch_com: */ movl %eax, 4*MPPTDI(%esi) /* restore cpu's prv page */ +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif /* XXX: we have just changed the page tables.. reload.. */ movl %ebx, %cr3 #endif /* SMP */ diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h index 3b7eb9c..4143dce 100644 --- a/sys/amd64/include/cpufunc.h +++ b/sys/amd64/include/cpufunc.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: cpufunc.h,v 1.3 1997/09/05 20:20:31 smp Exp smp $ + * $Id: cpufunc.h,v 1.72 1997/09/07 22:01:27 fsmp Exp $ */ /* @@ -45,6 +45,9 @@ #include <machine/lock.h> +#if defined(SWTCH_OPTIM_STATS) +extern int tlb_flush_count; +#endif #ifdef __GNUC__ @@ -234,6 +237,9 @@ invltlb(void) */ __asm __volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (temp) : : "memory"); +#if defined(SWTCH_OPTIM_STATS) + ++tlb_flush_count; +#endif } #endif /* SMP */ diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index d6db6e6..0aeefb2 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 - * $Id: machdep.c,v 1.277 1997/12/04 14:35:39 jkh Exp $ + * $Id: machdep.c,v 1.278 1997/12/04 21:21:24 jmg Exp $ */ #include "apm.h" @@ -150,6 +150,14 @@ int msgbufmapped = 0; /* set when safe to use msgbuf */ int _udatasel, _ucodesel; u_int atdevbase; +#if defined(SWTCH_OPTIM_STATS) +extern int swtch_optim_stats; +SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, + CTLFLAG_RD, &swtch_optim_stats, 0, ""); +SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, + CTLFLAG_RD, &tlb_flush_count, 0, ""); +#endif + int physmem = 0; int cold = 1; diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 2a8561f..0286bdb 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.172 1997/11/07 19:58:34 tegge Exp $ + * $Id: pmap.c,v 1.173 1997/11/20 19:30:31 bde Exp $ */ /* @@ -1116,6 +1116,28 @@ pmap_unuse_pt(pmap, va, mpte) return pmap_unwire_pte_hold(pmap, mpte); } +#if !defined(SMP) +void +pmap_pinit0(pmap) + struct pmap *pmap; +{ + pmap->pm_pdir = + (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); + pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD); + pmap->pm_flags = 0; + pmap->pm_count = 1; + pmap->pm_ptphint = NULL; + TAILQ_INIT(&pmap->pm_pvlist); +} +#else +void +pmap_pinit0(pmap) + struct pmap *pmap; +{ + pmap_pinit(pmap); +} +#endif + /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. @@ -3247,6 +3269,9 @@ pmap_mincore(pmap, addr) void pmap_activate(struct proc *p) { +#if defined(SWTCH_OPTIM_STATS) + ++tlb_flush_count; +#endif load_cr3(p->p_addr->u_pcb.pcb_cr3 = vtophys(p->p_vmspace->vm_pmap.pm_pdir)); } diff --git a/sys/i386/i386/support.s b/sys/i386/i386/support.s index 2ae3e20..3b9970f 100644 --- a/sys/i386/i386/support.s +++ b/sys/i386/i386/support.s @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: support.s,v 1.56 1997/08/09 00:02:44 dyson Exp $ + * $Id: support.s,v 1.57 1997/09/02 20:05:30 bde Exp $ */ #include "npx.h" @@ -1517,6 +1517,9 @@ ENTRY(rcr3) /* void load_cr3(caddr_t cr3) */ ENTRY(load_cr3) +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl 4(%esp),%eax movl %eax,%cr3 ret diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index 4d57b64..df0bda8 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.63 1997/09/21 15:03:58 peter Exp $ + * $Id: swtch.s,v 1.64 1997/10/10 09:44:06 peter Exp $ */ #include "npx.h" @@ -86,6 +86,11 @@ _hlt_vector: .long _default_halt /* pointer to halt routine */ .globl _want_resched _want_resched: .long 0 /* we need to re-run the scheduler */ +#if defined(SWTCH_OPTIM_STATS) + .globl _swtch_optim_stats, _tlb_flush_count +_swtch_optim_stats: .long 0 /* number of _swtch_optims */ +_tlb_flush_count: .long 0 +#endif .text /* @@ -252,6 +257,9 @@ _idle: /* use our idleproc's "context" */ movl _my_idlePTD,%ecx movl %ecx,%cr3 +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl $_idlestack_top,%ecx movl %ecx,%esp @@ -259,8 +267,7 @@ _idle: #ifdef VM86 movl _my_tr, %esi #endif /* VM86 */ - movl $_common_tss, %eax - movl %ecx, TSS_ESP0(%eax) + movl %ecx, _common_tss + TSS_ESP0 #ifdef VM86 btrl %esi, _private_tss @@ -302,6 +309,9 @@ _idle: .globl idle_loop idle_loop: +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif movl %cr3,%eax /* ouch! */ movl %eax,%cr3 @@ -357,15 +367,27 @@ idle_loop: #else xorl %ebp,%ebp movl $HIDENAME(tmpstk),%esp - movl _IdlePTD,%ecx - movl %ecx,%cr3 +#if defined(OVERLY_CONSERVATIVE_PTD_MGMT) +#if defined(SWTCH_OPTIM_STATS) + incl _swtch_optim_stats +#endif + movl _IdlePTD, %ecx + movl %cr3, %eax + cmpl %ecx, %eax + je 2f +#if defined(SWTCH_OPTIM_STATS) + decl _swtch_optim_stats + incl _tlb_flush_count +#endif + movl %ecx, %cr3 +2: +#endif /* update common_tss.tss_esp0 pointer */ #ifdef VM86 movl _my_tr, %esi #endif /* VM86 */ - movl $_common_tss, %eax - movl %esp, TSS_ESP0(%eax) + movl %esp, _common_tss + TSS_ESP0 #ifdef VM86 btrl %esi, _private_tss @@ -576,16 +598,28 @@ swtch_com: movl %eax,P_BACK(%ecx) /* isolate process to run */ movl P_ADDR(%ecx),%edx - movl PCB_CR3(%edx),%ebx #ifdef SMP + movl PCB_CR3(%edx),%ebx /* Grab the private PT pointer from the outgoing process's PTD */ movl $_PTD, %esi movl 4*MPPTDI(%esi), %eax /* fetch cpu's prv pt */ -#endif /* SMP */ - +#else +#if defined(SWTCH_OPTIM_STATS) + incl _swtch_optim_stats +#endif /* switch address space */ + movl %cr3,%ebx + cmpl PCB_CR3(%edx),%ebx + je 4f +#if defined(SWTCH_OPTIM_STATS) + decl _swtch_optim_stats + incl _tlb_flush_count +#endif + movl PCB_CR3(%edx),%ebx +#endif /* SMP */ movl %ebx,%cr3 +4: #ifdef SMP /* Copy the private PT to the new process's PTD */ @@ -597,6 +631,9 @@ swtch_com: */ movl %eax, 4*MPPTDI(%esi) /* restore cpu's prv page */ +#if defined(SWTCH_OPTIM_STATS) + incl _tlb_flush_count +#endif /* XXX: we have just changed the page tables.. reload.. */ movl %ebx, %cr3 #endif /* SMP */ diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h index 3b7eb9c..4143dce 100644 --- a/sys/i386/include/cpufunc.h +++ b/sys/i386/include/cpufunc.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: cpufunc.h,v 1.3 1997/09/05 20:20:31 smp Exp smp $ + * $Id: cpufunc.h,v 1.72 1997/09/07 22:01:27 fsmp Exp $ */ /* @@ -45,6 +45,9 @@ #include <machine/lock.h> +#if defined(SWTCH_OPTIM_STATS) +extern int tlb_flush_count; +#endif #ifdef __GNUC__ @@ -234,6 +237,9 @@ invltlb(void) */ __asm __volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (temp) : : "memory"); +#if defined(SWTCH_OPTIM_STATS) + ++tlb_flush_count; +#endif } #endif /* SMP */ diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index d4ea42b..620e1b9 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * @(#)init_main.c 8.9 (Berkeley) 1/21/94 - * $Id: init_main.c,v 1.77 1997/12/06 04:11:09 sef Exp $ + * $Id: init_main.c,v 1.78 1997/12/12 04:00:57 dyson Exp $ */ #include "opt_devfs.h" @@ -401,7 +401,7 @@ proc0_init(dummy) /* Allocate a prototype map so we have something to fork. */ p->p_vmspace = &vmspace0; vmspace0.vm_refcnt = 1; - pmap_pinit(&vmspace0.vm_pmap); + pmap_pinit0(&vmspace0.vm_pmap); vm_map_init(&vmspace0.vm_map, round_page(VM_MIN_ADDRESS), trunc_page(VM_MAXUSER_ADDRESS), TRUE); vmspace0.vm_map.pmap = &vmspace0.vm_pmap; diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 2c9697a..291cb01 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: pmap.h,v 1.23 1997/08/05 22:07:21 dyson Exp $ + * $Id: pmap.h,v 1.24 1997/08/05 23:03:24 dyson Exp $ */ /* @@ -113,6 +113,7 @@ void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t, boolean_t)); vm_offset_t pmap_phys_address __P((int)); void pmap_pinit __P((pmap_t)); +void pmap_pinit0 __P((pmap_t)); void pmap_protect __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t)); void pmap_qenter __P((vm_offset_t, vm_page_t *, int)); |