diff options
author | jlemon <jlemon@FreeBSD.org> | 1999-06-01 18:20:36 +0000 |
---|---|---|
committer | jlemon <jlemon@FreeBSD.org> | 1999-06-01 18:20:36 +0000 |
commit | b5d4171ff60677d883a8f4fdf912b23d5de9a433 (patch) | |
tree | 37d000a80ae9f6ab9ca36152f3fd48600aa82ae6 /sys/amd64 | |
parent | d4d2c873635d9e513e6e5720150fd4e44067d68f (diff) | |
download | FreeBSD-src-b5d4171ff60677d883a8f4fdf912b23d5de9a433.zip FreeBSD-src-b5d4171ff60677d883a8f4fdf912b23d5de9a433.tar.gz |
Unifdef VM86.
Reviewed by: silence on on -current
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/amd64/apic_vector.S | 4 | ||||
-rw-r--r-- | sys/amd64/amd64/cpu_switch.S | 15 | ||||
-rw-r--r-- | sys/amd64/amd64/exception.S | 5 | ||||
-rw-r--r-- | sys/amd64/amd64/exception.s | 5 | ||||
-rw-r--r-- | sys/amd64/amd64/genassym.c | 11 | ||||
-rw-r--r-- | sys/amd64/amd64/locore.S | 18 | ||||
-rw-r--r-- | sys/amd64/amd64/locore.s | 18 | ||||
-rw-r--r-- | sys/amd64/amd64/machdep.c | 648 | ||||
-rw-r--r-- | sys/amd64/amd64/mp_machdep.c | 5 | ||||
-rw-r--r-- | sys/amd64/amd64/mpboot.S | 6 | ||||
-rw-r--r-- | sys/amd64/amd64/mptable.c | 5 | ||||
-rw-r--r-- | sys/amd64/amd64/pmap.c | 3 | ||||
-rw-r--r-- | sys/amd64/amd64/swtch.s | 15 | ||||
-rw-r--r-- | sys/amd64/amd64/sys_machdep.c | 9 | ||||
-rw-r--r-- | sys/amd64/amd64/trap.c | 11 | ||||
-rw-r--r-- | sys/amd64/amd64/vm_machdep.c | 15 | ||||
-rw-r--r-- | sys/amd64/include/mptable.h | 5 | ||||
-rw-r--r-- | sys/amd64/include/pcb.h | 6 | ||||
-rw-r--r-- | sys/amd64/include/pcpu.h | 4 |
19 files changed, 381 insertions, 427 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S index 66d53a6..11d370e 100644 --- a/sys/amd64/amd64/apic_vector.S +++ b/sys/amd64/amd64/apic_vector.S @@ -1,6 +1,6 @@ /* * from: vector.s, 386BSD 0.1 unknown origin - * $Id: apic_vector.s,v 1.37 1999/04/28 01:04:12 luoqi Exp $ + * $Id: apic_vector.s,v 1.38 1999/05/28 14:08:57 bde Exp $ */ @@ -628,10 +628,8 @@ _Xcpucheckstate: andl $3, %eax cmpl $3, %eax je 1f -#ifdef VM86 testl $PSL_VM, 24(%esp) jne 1f -#endif incl %ebx /* system or interrupt */ #ifdef CPL_AND_CML cmpl $0, _inside_intr diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index f3d7671..7578618 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -33,12 +33,11 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.80 1999/05/06 09:44:49 bde Exp $ + * $Id: swtch.s,v 1.81 1999/05/12 21:38:45 luoqi Exp $ */ #include "npx.h" #include "opt_user_ldt.h" -#include "opt_vm86.h" #include <sys/rtprio.h> @@ -277,7 +276,6 @@ _idle: /* update common_tss.tss_esp0 pointer */ movl %ecx, _common_tss + TSS_ESP0 -#ifdef VM86 movl _cpuid, %esi btrl %esi, _private_tss jae 1f @@ -294,7 +292,6 @@ _idle: movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 1: -#endif /* VM86 */ sti @@ -397,7 +394,6 @@ idle_loop: /* update common_tss.tss_esp0 pointer */ movl %esp, _common_tss + TSS_ESP0 -#ifdef VM86 movl $0, %esi btrl %esi, _private_tss jae 1f @@ -413,7 +409,6 @@ idle_loop: movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 1: -#endif /* VM86 */ sti @@ -630,7 +625,6 @@ swtch_com: movl %ebx,%cr3 4: -#ifdef VM86 #ifdef SMP movl _cpuid, %esi #else @@ -642,18 +636,12 @@ swtch_com: movl PCB_EXT(%edx), %edi /* new tss descriptor */ jmp 2f 1: -#endif /* update common_tss.tss_esp0 pointer */ movl %edx, %ebx /* pcb */ -#ifdef VM86 addl $(UPAGES * PAGE_SIZE - 16), %ebx -#else - addl $(UPAGES * PAGE_SIZE), %ebx -#endif /* VM86 */ movl %ebx, _common_tss + TSS_ESP0 -#ifdef VM86 btrl %esi, _private_tss jae 3f #ifdef SMP @@ -672,7 +660,6 @@ swtch_com: movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 3: -#endif /* VM86 */ movl P_VMSPACE(%ecx), %ebx #ifdef SMP movl _cpuid, %eax diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S index 8a20142..9030216 100644 --- a/sys/amd64/amd64/exception.S +++ b/sys/amd64/amd64/exception.S @@ -30,11 +30,10 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: exception.s,v 1.59 1999/04/28 01:03:18 luoqi Exp $ + * $Id: exception.s,v 1.60 1999/05/06 09:44:49 bde Exp $ */ #include "npx.h" -#include "opt_vm86.h" #include <machine/asmacros.h> #include <machine/ipl.h> @@ -362,12 +361,10 @@ ENTRY(fork_trampoline) jmp _doreti -#ifdef VM86 /* * Include vm86 call routines, which want to call _doreti. */ #include "i386/i386/vm86bios.s" -#endif /* VM86 */ /* * Include what was once config+isa-dependent code. diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s index 8a20142..9030216 100644 --- a/sys/amd64/amd64/exception.s +++ b/sys/amd64/amd64/exception.s @@ -30,11 +30,10 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: exception.s,v 1.59 1999/04/28 01:03:18 luoqi Exp $ + * $Id: exception.s,v 1.60 1999/05/06 09:44:49 bde Exp $ */ #include "npx.h" -#include "opt_vm86.h" #include <machine/asmacros.h> #include <machine/ipl.h> @@ -362,12 +361,10 @@ ENTRY(fork_trampoline) jmp _doreti -#ifdef VM86 /* * Include vm86 call routines, which want to call _doreti. */ #include "i386/i386/vm86bios.s" -#endif /* VM86 */ /* * Include what was once config+isa-dependent code. diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c index ed1ecb0..337fa18 100644 --- a/sys/amd64/amd64/genassym.c +++ b/sys/amd64/amd64/genassym.c @@ -34,10 +34,9 @@ * SUCH DAMAGE. * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 - * $Id: genassym.c,v 1.68 1999/05/12 21:30:49 luoqi Exp $ + * $Id: genassym.c,v 1.69 1999/05/12 21:38:40 luoqi Exp $ */ -#include "opt_vm86.h" #include "opt_user_ldt.h" #include <stddef.h> @@ -68,9 +67,7 @@ #endif #include <machine/segments.h> #include <machine/globaldata.h> -#ifdef VM86 #include <machine/vm86.h> -#endif #define OS(s, m) ((u_int)offsetof(struct s, m)) @@ -128,9 +125,7 @@ main() printf("#define\tTSS_ESP0 %#x\n", OS(i386tss, tss_esp0)); printf("#define\tPCB_USERLDT %#x\n", OS(pcb, pcb_ldt)); printf("#define\tPCB_GS %#x\n", OS(pcb, pcb_gs)); -#ifdef VM86 printf("#define\tPCB_EXT %#x\n", OS(pcb, pcb_ext)); -#endif #ifdef SMP printf("#define\tPCB_MPNEST %#x\n", OS(pcb, pcb_mpnest)); #endif @@ -201,10 +196,8 @@ main() printf("#define\tGD_COMMON_TSS %#x\n", OS(globaldata, gd_common_tss)); printf("#define\tGD_SWITCHTIME %#x\n", OS(globaldata, gd_switchtime)); printf("#define\tGD_SWITCHTICKS %#x\n", OS(globaldata, gd_switchticks)); -#ifdef VM86 printf("#define\tGD_COMMON_TSSD %#x\n", OS(globaldata, gd_common_tssd)); printf("#define\tGD_TSS_GDT %#x\n", OS(globaldata, gd_tss_gdt)); -#endif #ifdef USER_LDT printf("#define\tGD_CURRENTLDT %#x\n", OS(globaldata, gd_currentldt)); #endif @@ -233,9 +226,7 @@ main() printf("#define\tKPSEL %#x\n", GSEL(GPRIV_SEL, SEL_KPL)); #endif printf("#define\tGPROC0_SEL %#x\n", GPROC0_SEL); -#ifdef VM86 printf("#define\tVM86_FRAMESIZE %#x\n", sizeof(struct vm86frame)); -#endif return (0); } diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S index df42eaa..4e29fd4 100644 --- a/sys/amd64/amd64/locore.S +++ b/sys/amd64/amd64/locore.S @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 - * $Id: locore.s,v 1.121 1999/04/28 01:03:20 luoqi Exp $ + * $Id: locore.s,v 1.122 1999/05/09 19:01:49 peter Exp $ * * originally from: locore.s, by William F. Jolitz * @@ -48,7 +48,6 @@ #include "opt_ddb.h" #include "opt_nfsroot.h" #include "opt_userconfig.h" -#include "opt_vm86.h" #include <sys/syscall.h> #include <sys/reboot.h> @@ -134,13 +133,11 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */ _proc0paddr: .long 0 /* address of proc 0 address space */ p0upa: .long 0 /* phys addr of proc0's UPAGES */ -#ifdef VM86 vm86phystk: .long 0 /* PA of vm86/bios stack */ .globl _vm86paddr, _vm86pa _vm86paddr: .long 0 /* address of vm86 region */ _vm86pa: .long 0 /* phys addr of vm86 region */ -#endif #ifdef BDE_DEBUGGER .globl _bdb_exists /* flag to indicate BDE debugger is present */ @@ -311,18 +308,10 @@ NON_GPROF_ENTRY(btext) stosb #if NAPM > 0 -#ifndef VM86 -/* - * XXX it's not clear that APM can live in the current environonment. - * Only pc-relative addressing works. - */ - call _apm_setup -#endif #endif call create_pagetables -#ifdef VM86 /* * If the CPU has support for VME, turn it on. */ @@ -332,7 +321,6 @@ NON_GPROF_ENTRY(btext) orl $CR4_VME, %eax movl %eax, %cr4 1: -#endif /* VM86 */ #ifdef BDE_DEBUGGER /* @@ -786,7 +774,6 @@ no_kernend: addl $KERNBASE, %esi movl %esi, R(_proc0paddr) -#ifdef VM86 ALLOCPAGES(1) /* vm86/bios stack */ movl %esi,R(vm86phystk) @@ -794,7 +781,6 @@ no_kernend: movl %esi,R(_vm86pa) addl $KERNBASE, %esi movl %esi, R(_vm86paddr) -#endif /* VM86 */ #ifdef SMP /* Allocate cpu0's private data page */ @@ -862,7 +848,6 @@ map_read_write: movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx fillkptphys($PG_RW) -#ifdef VM86 /* Map space for the vm86 region */ movl R(vm86phystk), %eax movl $4, %ecx @@ -879,7 +864,6 @@ map_read_write: movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx fillkpt(R(_vm86pa), $PG_RW|PG_U) -#endif /* VM86 */ #ifdef SMP /* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s index df42eaa..4e29fd4 100644 --- a/sys/amd64/amd64/locore.s +++ b/sys/amd64/amd64/locore.s @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 - * $Id: locore.s,v 1.121 1999/04/28 01:03:20 luoqi Exp $ + * $Id: locore.s,v 1.122 1999/05/09 19:01:49 peter Exp $ * * originally from: locore.s, by William F. Jolitz * @@ -48,7 +48,6 @@ #include "opt_ddb.h" #include "opt_nfsroot.h" #include "opt_userconfig.h" -#include "opt_vm86.h" #include <sys/syscall.h> #include <sys/reboot.h> @@ -134,13 +133,11 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */ _proc0paddr: .long 0 /* address of proc 0 address space */ p0upa: .long 0 /* phys addr of proc0's UPAGES */ -#ifdef VM86 vm86phystk: .long 0 /* PA of vm86/bios stack */ .globl _vm86paddr, _vm86pa _vm86paddr: .long 0 /* address of vm86 region */ _vm86pa: .long 0 /* phys addr of vm86 region */ -#endif #ifdef BDE_DEBUGGER .globl _bdb_exists /* flag to indicate BDE debugger is present */ @@ -311,18 +308,10 @@ NON_GPROF_ENTRY(btext) stosb #if NAPM > 0 -#ifndef VM86 -/* - * XXX it's not clear that APM can live in the current environonment. - * Only pc-relative addressing works. - */ - call _apm_setup -#endif #endif call create_pagetables -#ifdef VM86 /* * If the CPU has support for VME, turn it on. */ @@ -332,7 +321,6 @@ NON_GPROF_ENTRY(btext) orl $CR4_VME, %eax movl %eax, %cr4 1: -#endif /* VM86 */ #ifdef BDE_DEBUGGER /* @@ -786,7 +774,6 @@ no_kernend: addl $KERNBASE, %esi movl %esi, R(_proc0paddr) -#ifdef VM86 ALLOCPAGES(1) /* vm86/bios stack */ movl %esi,R(vm86phystk) @@ -794,7 +781,6 @@ no_kernend: movl %esi,R(_vm86pa) addl $KERNBASE, %esi movl %esi, R(_vm86paddr) -#endif /* VM86 */ #ifdef SMP /* Allocate cpu0's private data page */ @@ -862,7 +848,6 @@ map_read_write: movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx fillkptphys($PG_RW) -#ifdef VM86 /* Map space for the vm86 region */ movl R(vm86phystk), %eax movl $4, %ecx @@ -879,7 +864,6 @@ map_read_write: movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx fillkpt(R(_vm86pa), $PG_RW|PG_U) -#endif /* VM86 */ #ifdef SMP /* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index 006f3fd..22c7290 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 - * $Id: machdep.c,v 1.335 1999/05/12 21:38:42 luoqi Exp $ + * $Id: machdep.c,v 1.336 1999/05/31 18:35:53 dfr Exp $ */ #include "apm.h" @@ -53,7 +53,6 @@ #include "opt_sysvipc.h" #include "opt_user_ldt.h" #include "opt_userconfig.h" -#include "opt_vm86.h" #include <sys/param.h> #include <sys/systm.h> @@ -124,9 +123,8 @@ #include <i386/isa/isa_device.h> #endif #include <i386/isa/intr_machdep.h> -#ifndef VM86 #include <isa/rtc.h> -#endif +#include <machine/vm86.h> #include <machine/random.h> #include <sys/ptrace.h> @@ -568,7 +566,6 @@ sendsig(catcher, sig, mask, code) sf.sf_sc.sc_trapno = regs->tf_trapno; sf.sf_sc.sc_err = regs->tf_err; -#ifdef VM86 /* * If we're a vm86 process, we want to save the segment registers. * We also change eflags to be our emulated eflags, not the actual @@ -600,7 +597,6 @@ sendsig(catcher, sig, mask, code) */ tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); } -#endif /* VM86 */ /* * Copy the sigframe out to the user's stack. @@ -657,7 +653,6 @@ sigreturn(p, uap) return(EFAULT); eflags = scp->sc_ps; -#ifdef VM86 if (eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86; @@ -691,7 +686,6 @@ sigreturn(p, uap) tf->tf_es = _udatasel; tf->tf_fs = _udatasel; } else { -#endif /* VM86 */ /* * Don't allow users to change privileged or reserved flags. */ @@ -729,9 +723,7 @@ sigreturn(p, uap) regs->tf_ds = scp->sc_ds; regs->tf_es = scp->sc_es; regs->tf_fs = scp->sc_fs; -#ifdef VM86 } -#endif /* restore scratch registers */ regs->tf_eax = scp->sc_eax; @@ -902,12 +894,10 @@ union descriptor ldt[NLDT]; /* local descriptor table */ struct region_descriptor r_gdt, r_idt; #endif -#ifdef VM86 #ifndef SMP extern struct segment_descriptor common_tssd, *tss_gdt; #endif int private_tss; /* flag indicating private tss */ -#endif /* VM86 */ #if defined(I586_CPU) && !defined(NO_F00F_HACK) struct gate_descriptor *t_idt; @@ -1138,26 +1128,375 @@ sdtossd(sd, ssd) ssd->ssd_gran = sd->sd_gran; } +#define PHYSMAP_SIZE (2 * 8) + +static void +getmemsize(int first) +{ + int i, physmap_idx, pa_indx; + u_int basemem, extmem; + int speculative_mprobe = FALSE; + struct vm86frame vmf; + struct vm86context vmc; + vm_offset_t pa, physmap[PHYSMAP_SIZE]; + pt_entry_t pte; + struct { + u_int64_t base; + u_int64_t length; + u_int32_t type; + } *smap; +#if NNPX > 0 + int msize; +#endif + + bzero(&vmf, sizeof(struct vm86frame)); + bzero(physmap, sizeof(physmap)); + + vm86_intcall(0x12, &vmf); + basemem = vmf.vmf_ax; + if (basemem > 640) { + printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", + basemem); + basemem = 640; + } + + /* + * XXX if biosbasemem is now < 640, there is `hole' + * between the end of base memory and the start of + * ISA memory. The hole may be empty or it may + * contain BIOS code or data. Map it read/write so + * that the BIOS can write to it. (Memory from 0 to + * the physical end of the kernel is mapped read-only + * to begin with and then parts of it are remapped. + * The parts that aren't remapped form holes that + * remain read-only and are unused by the kernel. + * The base memory area is below the physical end of + * the kernel and right now forms a read-only hole. + * The part of it from PAGE_SIZE to + * (trunc_page(biosbasemem * 1024) - 1) will be + * remapped and used by the kernel later.) + * + * This code is similar to the code used in + * pmap_mapdev, but since no memory needs to be + * allocated we simply change the mapping. + */ + for (pa = trunc_page(basemem * 1024); + pa < ISA_HOLE_START; pa += PAGE_SIZE) { + pte = (pt_entry_t)vtopte(pa + KERNBASE); + *pte = pa | PG_RW | PG_V; + } + + /* + * if basemem != 640, map pages r/w into vm86 page table so + * that the bios can scribble on it. + */ + pte = (pt_entry_t)vm86paddr; + for (i = basemem / 4; i < 160; i++) + pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; + + /* + * map page 1 R/W into the kernel page table so we can use it + * as a buffer. The kernel will unmap this page later. + */ + pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); + *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; + + /* + * get memory map with INT 15:E820 + */ +#define SMAPSIZ sizeof(*smap) +#define SMAP_SIG 0x534D4150 /* 'SMAP' */ + + vmc.npages = 0; + smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); + vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); + + physmap_idx = 0; + vmf.vmf_ebx = 0; + do { + vmf.vmf_eax = 0xE820; + vmf.vmf_edx = SMAP_SIG; + vmf.vmf_ecx = SMAPSIZ; + i = vm86_datacall(0x15, &vmf, &vmc); + if (i || vmf.vmf_eax != SMAP_SIG) + break; + if (boothowto & RB_VERBOSE) + printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", + smap->type, + *(u_int32_t *)((char *)&smap->base + 4), + (u_int32_t)smap->base, + *(u_int32_t *)((char *)&smap->length + 4), + (u_int32_t)smap->length); + + if (smap->type != 0x01) + goto next_run; + + if (smap->length == 0) + goto next_run; + + for (i = 0; i <= physmap_idx; i += 2) { + if (smap->base < physmap[i + 1]) { + if (boothowto & RB_VERBOSE) + printf( + "Overlapping or non-montonic memory region, ignoring second region\n"); + goto next_run; + } + } + + if (smap->base == physmap[physmap_idx + 1]) { + physmap[physmap_idx + 1] += smap->length; + goto next_run; + } + + physmap_idx += 2; + if (physmap_idx == PHYSMAP_SIZE) { + printf( + "Too many segments in the physical address map, giving up\n"); + break; + } + physmap[physmap_idx] = smap->base; + physmap[physmap_idx + 1] = smap->base + smap->length; +next_run: + } while (vmf.vmf_ebx != 0); + + if (physmap[1] != 0) + goto physmap_done; + + /* + * try memory map with INT 15:E801 + */ + vmf.vmf_ax = 0xE801; + if (vm86_intcall(0x15, &vmf) == 0) { + extmem = vmf.vmf_cx + vmf.vmf_dx * 64; + } else { +#if 0 + vmf.vmf_ah = 0x88; + vm86_intcall(0x15, &vmf); + extmem = vmf.vmf_ax; +#else + /* + * Prefer the RTC value for extended memory. + */ + extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); +#endif + } + + /* + * Only perform calculations in this section if there is no system + * map; any system new enough that supports SMAP probably does not + * need these workarounds. + */ + /* + * Special hack for chipsets that still remap the 384k hole when + * there's 16MB of memory - this really confuses people that + * are trying to use bus mastering ISA controllers with the + * "16MB limit"; they only have 16MB, but the remapping puts + * them beyond the limit. + */ + /* + * If extended memory is between 15-16MB (16-17MB phys address range), + * chop it to 15MB. + */ + if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) + extmem = 15 * 1024; + + physmap[0] = 0; + physmap[1] = basemem * 1024; + physmap_idx = 2; + physmap[physmap_idx] = 0x100000; + physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; + + /* + * Indicate that we wish to do a speculative search for memory + * beyond the end of the reported size if the indicated amount + * is 64M (or more). + * + * XXX we should only do this in the RTC / 0x88 case + */ + if (extmem >= 16 * 1024) + speculative_mprobe = TRUE; + +physmap_done: + /* + * Now, physmap contains a map of physical memory. + */ + +#ifdef SMP + /* make hole for AP bootstrap code */ + physmap[1] = mp_bootaddress(physmap[1] / 1024); +#endif + + /* + * Maxmem isn't the "maximum memory", it's one larger than the + * highest page of the physical address space. It should be + * called something like "Maxphyspage". + */ + Maxmem = physmap[physmap_idx + 1] / PAGE_SIZE; + + /* + * If a specific amount of memory is indicated via the MAXMEM + * option or the npx0 "msize", then don't do the speculative + * memory probe. + */ +#ifdef MAXMEM + Maxmem = MAXMEM / 4; + speculative_mprobe = FALSE; +#endif + +#if NNPX > 0 + if (resource_int_value("npx", 0, "msize", &msize) == 0) { + if (msize != 0) { + Maxmem = msize / 4; + speculative_mprobe = FALSE; + } + } +#endif + +/* XXX former point of mp_probe() and pmap_bootstrap() */ + + /* + * Size up each available chunk of physical memory. + */ + physmap[0] = PAGE_SIZE; /* mask off page 0 */ + pa_indx = 0; + phys_avail[pa_indx++] = physmap[0]; + phys_avail[pa_indx] = physmap[0]; + pte = (pt_entry_t)vtopte(KERNBASE); + *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; + + /* + * physmap is in bytes, so when converting to page boundaries, + * round up the start address and round down the end address. + */ + for (i = 0; i <= physmap_idx; i += 2) { + int end; + + end = ptoa(Maxmem); + if (physmap[i + 1] < end) + end = trunc_page(physmap[i + 1]); + for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { + int tmp, page_bad; + int *ptr = 0; + + /* + * block out kernel memory as not available. + */ + if (pa >= 0x100000 && pa < first) + continue; + + page_bad = FALSE; + + /* + * map page into kernel: valid, read/write,non-cacheable + */ + *pte = pa | PG_V | PG_RW | PG_N; + invltlb(); + + tmp = *(int *)ptr; + /* + * Test for alternating 1's and 0's + */ + *(volatile int *)ptr = 0xaaaaaaaa; + if (*(volatile int *)ptr != 0xaaaaaaaa) { + page_bad = TRUE; + } + /* + * Test for alternating 0's and 1's + */ + *(volatile int *)ptr = 0x55555555; + if (*(volatile int *)ptr != 0x55555555) { + page_bad = TRUE; + } + /* + * Test for all 1's + */ + *(volatile int *)ptr = 0xffffffff; + if (*(volatile int *)ptr != 0xffffffff) { + page_bad = TRUE; + } + /* + * Test for all 0's + */ + *(volatile int *)ptr = 0x0; + if (*(volatile int *)ptr != 0x0) { + page_bad = TRUE; + } + /* + * Restore original value. + */ + *(int *)ptr = tmp; + + /* + * Adjust array of valid/good pages. + */ + if (page_bad == TRUE) { + continue; + } + /* + * If this good page is a continuation of the + * previous set of good pages, then just increase + * the end pointer. Otherwise start a new chunk. + * Note that "end" points one higher than end, + * making the range >= start and < end. + * If we're also doing a speculative memory + * test and we at or past the end, bump up Maxmem + * so that we keep going. The first bad page + * will terminate the loop. + */ + if (phys_avail[pa_indx] == pa) { + phys_avail[pa_indx] += PAGE_SIZE; + if (speculative_mprobe == TRUE && + phys_avail[pa_indx] >= (64*1024*1024)) + end += PAGE_SIZE; + } else { + pa_indx++; + if (pa_indx == PHYS_AVAIL_ARRAY_END) { + printf("Too many holes in the physical address space, giving up\n"); + pa_indx--; + break; + } + phys_avail[pa_indx++] = pa; /* start */ + phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ + } + physmem++; + } + } + *pte = 0; + invltlb(); + + /* + * XXX + * The last chunk must contain at least one page plus the message + * buffer to avoid complicating other code (message buffer address + * calculation, etc.). + */ + while (phys_avail[pa_indx - 1] + PAGE_SIZE + + round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { + physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); + phys_avail[pa_indx--] = 0; + phys_avail[pa_indx--] = 0; + } + + Maxmem = atop(phys_avail[pa_indx]); + + /* Trim off space for the message buffer. */ + phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); + + avail_end = phys_avail[pa_indx]; +} + void init386(first) int first; { int x; - unsigned biosbasemem, biosextmem; struct gate_descriptor *gdp; int gsel_tss; -#if NNPX > 0 - int msize; -#endif - #ifndef SMP /* table descriptors - used to load tables by microp */ struct region_descriptor r_gdt, r_idt; #endif - int pagesinbase, pagesinext; - vm_offset_t target_page; - int pa_indx, off; - int speculative_mprobe; + int off; /* * Prevent lowering of the ipl if we call tsleep() early. @@ -1286,20 +1625,14 @@ init386(first) initializecpu(); /* Initialize CPU registers */ /* make an initial tss so cpu can get interrupt stack on syscall! */ -#ifdef VM86 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; -#else - common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE; -#endif /* VM86 */ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; common_tss.tss_ioopt = (sizeof common_tss) << 16; gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); ltr(gsel_tss); -#ifdef VM86 private_tss = 0; tss_gdt = &gdt[GPROC0_SEL].sd; common_tssd = *tss_gdt; -#endif dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; @@ -1314,138 +1647,8 @@ init386(first) dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); -#ifdef VM86 - initial_bioscalls(&biosbasemem, &biosextmem); -#else - - /* Use BIOS values stored in RTC CMOS RAM, since probing - * breaks certain 386 AT relics. - */ - biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8); - biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8); -#endif - - /* - * If BIOS tells us that it has more than 640k in the basemem, - * don't believe it - set it to 640k. - */ - if (biosbasemem > 640) { - printf("Preposterous RTC basemem of %uK, truncating to 640K\n", - biosbasemem); - biosbasemem = 640; - } - if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) { - printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", - bootinfo.bi_basemem); - bootinfo.bi_basemem = 640; - } - - /* - * Warn if the official BIOS interface disagrees with the RTC - * interface used above about the amount of base memory or the - * amount of extended memory. Prefer the BIOS value for the base - * memory. This is necessary for machines that `steal' base - * memory for use as BIOS memory, at least if we are going to use - * the BIOS for apm. Prefer the RTC value for extended memory. - * Eventually the hackish interface shouldn't even be looked at. - */ - if (bootinfo.bi_memsizes_valid) { - if (bootinfo.bi_basemem != biosbasemem) { - vm_offset_t pa; - - printf( - "BIOS basemem (%uK) != RTC basemem (%uK), setting to BIOS value\n", - bootinfo.bi_basemem, biosbasemem); - biosbasemem = bootinfo.bi_basemem; - - /* - * XXX if biosbasemem is now < 640, there is `hole' - * between the end of base memory and the start of - * ISA memory. The hole may be empty or it may - * contain BIOS code or data. Map it read/write so - * that the BIOS can write to it. (Memory from 0 to - * the physical end of the kernel is mapped read-only - * to begin with and then parts of it are remapped. - * The parts that aren't remapped form holes that - * remain read-only and are unused by the kernel. - * The base memory area is below the physical end of - * the kernel and right now forms a read-only hole. - * The part of it from PAGE_SIZE to - * (trunc_page(biosbasemem * 1024) - 1) will be - * remapped and used by the kernel later.) - * - * This code is similar to the code used in - * pmap_mapdev, but since no memory needs to be - * allocated we simply change the mapping. - */ - for (pa = trunc_page(biosbasemem * 1024); - pa < ISA_HOLE_START; pa += PAGE_SIZE) { - unsigned *pte; - - pte = (unsigned *)vtopte(pa + KERNBASE); - *pte = pa | PG_RW | PG_V; - } - } - if (bootinfo.bi_extmem != biosextmem) - printf("BIOS extmem (%uK) != RTC extmem (%uK)\n", - bootinfo.bi_extmem, biosextmem); - } - -#ifdef SMP - /* make hole for AP bootstrap code */ - pagesinbase = mp_bootaddress(biosbasemem) / PAGE_SIZE; -#else - pagesinbase = biosbasemem * 1024 / PAGE_SIZE; -#endif - - pagesinext = biosextmem * 1024 / PAGE_SIZE; - - /* - * Special hack for chipsets that still remap the 384k hole when - * there's 16MB of memory - this really confuses people that - * are trying to use bus mastering ISA controllers with the - * "16MB limit"; they only have 16MB, but the remapping puts - * them beyond the limit. - */ - /* - * If extended memory is between 15-16MB (16-17MB phys address range), - * chop it to 15MB. - */ - if ((pagesinext > 3840) && (pagesinext < 4096)) - pagesinext = 3840; - - /* - * Maxmem isn't the "maximum memory", it's one larger than the - * highest page of the physical address space. It should be - * called something like "Maxphyspage". - */ - Maxmem = pagesinext + 0x100000/PAGE_SIZE; - /* - * Indicate that we wish to do a speculative search for memory beyond - * the end of the reported size if the indicated amount is 64MB (0x4000 - * pages) - which is the largest amount that the BIOS/bootblocks can - * currently report. If a specific amount of memory is indicated via - * the MAXMEM option or the npx0 "msize", then don't do the speculative - * memory probe. - */ - if (Maxmem >= 0x4000) - speculative_mprobe = TRUE; - else - speculative_mprobe = FALSE; - -#ifdef MAXMEM - Maxmem = MAXMEM/4; - speculative_mprobe = FALSE; -#endif - -#if NNPX > 0 - if (resource_int_value("npx", 0, "msize", &msize) == 0) { - if (msize != 0) { - Maxmem = msize / 4; - speculative_mprobe = FALSE; - } - } -#endif + vm86_initialize(); + getmemsize(first); #ifdef SMP /* look for the MP hardware - needed for apic addresses */ @@ -1453,130 +1656,7 @@ init386(first) #endif /* call pmap initialization to make new kernel address space */ - pmap_bootstrap (first, 0); - - /* - * Size up each available chunk of physical memory. - */ - - /* - * We currently don't bother testing base memory. - * XXX ...but we probably should. - */ - pa_indx = 0; - if (pagesinbase > 1) { - phys_avail[pa_indx++] = PAGE_SIZE; /* skip first page of memory */ - phys_avail[pa_indx] = ptoa(pagesinbase);/* memory up to the ISA hole */ - physmem = pagesinbase - 1; - } else { - /* point at first chunk end */ - pa_indx++; - } - - for (target_page = avail_start; target_page < ptoa(Maxmem); target_page += PAGE_SIZE) { - int tmp, page_bad; - - page_bad = FALSE; - - /* - * map page into kernel: valid, read/write, non-cacheable - */ - *(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page; - invltlb(); - - tmp = *(int *)CADDR1; - /* - * Test for alternating 1's and 0's - */ - *(volatile int *)CADDR1 = 0xaaaaaaaa; - if (*(volatile int *)CADDR1 != 0xaaaaaaaa) { - page_bad = TRUE; - } - /* - * Test for alternating 0's and 1's - */ - *(volatile int *)CADDR1 = 0x55555555; - if (*(volatile int *)CADDR1 != 0x55555555) { - page_bad = TRUE; - } - /* - * Test for all 1's - */ - *(volatile int *)CADDR1 = 0xffffffff; - if (*(volatile int *)CADDR1 != 0xffffffff) { - page_bad = TRUE; - } - /* - * Test for all 0's - */ - *(volatile int *)CADDR1 = 0x0; - if (*(volatile int *)CADDR1 != 0x0) { - /* - * test of page failed - */ - page_bad = TRUE; - } - /* - * Restore original value. - */ - *(int *)CADDR1 = tmp; - - /* - * Adjust array of valid/good pages. - */ - if (page_bad == FALSE) { - /* - * If this good page is a continuation of the - * previous set of good pages, then just increase - * the end pointer. Otherwise start a new chunk. - * Note that "end" points one higher than end, - * making the range >= start and < end. - * If we're also doing a speculative memory - * test and we at or past the end, bump up Maxmem - * so that we keep going. The first bad page - * will terminate the loop. - */ - if (phys_avail[pa_indx] == target_page) { - phys_avail[pa_indx] += PAGE_SIZE; - if (speculative_mprobe == TRUE && - phys_avail[pa_indx] >= (64*1024*1024)) - Maxmem++; - } else { - pa_indx++; - if (pa_indx == PHYS_AVAIL_ARRAY_END) { - printf("Too many holes in the physical address space, giving up\n"); - pa_indx--; - break; - } - phys_avail[pa_indx++] = target_page; /* start */ - phys_avail[pa_indx] = target_page + PAGE_SIZE; /* end */ - } - physmem++; - } - } - - *(int *)CMAP1 = 0; - invltlb(); - - /* - * XXX - * The last chunk must contain at least one page plus the message - * buffer to avoid complicating other code (message buffer address - * calculation, etc.). - */ - while (phys_avail[pa_indx - 1] + PAGE_SIZE + - round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { - physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); - phys_avail[pa_indx--] = 0; - phys_avail[pa_indx--] = 0; - } - - Maxmem = atop(phys_avail[pa_indx]); - - /* Trim off space for the message buffer. */ - phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); - - avail_end = phys_avail[pa_indx]; + pmap_bootstrap(first, 0); /* now running on new page tables, configured,and u/iom is accessible */ @@ -1614,9 +1694,7 @@ init386(first) #ifdef SMP proc0.p_addr->u_pcb.pcb_mpnest = 1; #endif -#ifdef VM86 proc0.p_addr->u_pcb.pcb_ext = 0; -#endif /* Sigh, relocate physical addresses left from bootstrap */ if (bootinfo.bi_modulep) { diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c index ad9e6c6..3365d30 100644 --- a/sys/amd64/amd64/mp_machdep.c +++ b/sys/amd64/amd64/mp_machdep.c @@ -22,11 +22,10 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $ + * $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $ */ #include "opt_smp.h" -#include "opt_vm86.h" #include "opt_cpu.h" #include "opt_user_ldt.h" @@ -482,10 +481,8 @@ init_secondary(void) common_tss.tss_esp0 = 0; /* not used until after switch */ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); common_tss.tss_ioopt = (sizeof common_tss) << 16; -#ifdef VM86 tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd; common_tssd = *tss_gdt; -#endif ltr(gsel_tss); load_cr0(0x8005003b); /* XXX! */ diff --git a/sys/amd64/amd64/mpboot.S b/sys/amd64/amd64/mpboot.S index 8be76d7..962bafc 100644 --- a/sys/amd64/amd64/mpboot.S +++ b/sys/amd64/amd64/mpboot.S @@ -31,11 +31,9 @@ * mpboot.s: FreeBSD machine support for the Intel MP Spec * multiprocessor systems. * - * $Id: mpboot.s,v 1.9 1999/04/10 22:58:29 tegge Exp $ + * $Id: mpboot.s,v 1.10 1999/04/28 01:03:22 luoqi Exp $ */ -#include "opt_vm86.h" - #include <machine/asmacros.h> /* miscellaneous asm macros */ #include <machine/apic.h> #include <machine/specialreg.h> @@ -94,7 +92,6 @@ mp_begin: /* now running relocated at KERNBASE */ call _init_secondary /* load i386 tables */ CHECKPOINT(0x38, 5) -#ifdef VM86 /* * If the [BSP] CPU has support for VME, turn it on. */ @@ -104,7 +101,6 @@ mp_begin: /* now running relocated at KERNBASE */ orl $CR4_VME, %eax movl %eax, %cr4 1: -#endif /* disable the APIC, just to be SURE */ movl lapic_svr, %eax /* get spurious vector reg. */ diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c index ad9e6c6..3365d30 100644 --- a/sys/amd64/amd64/mptable.c +++ b/sys/amd64/amd64/mptable.c @@ -22,11 +22,10 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $ + * $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $ */ #include "opt_smp.h" -#include "opt_vm86.h" #include "opt_cpu.h" #include "opt_user_ldt.h" @@ -482,10 +481,8 @@ init_secondary(void) common_tss.tss_esp0 = 0; /* not used until after switch */ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); common_tss.tss_ioopt = (sizeof common_tss) << 16; -#ifdef VM86 tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd; common_tssd = *tss_gdt; -#endif ltr(gsel_tss); load_cr0(0x8005003b); /* XXX! */ diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 570c728..a85754a 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.235 1999/05/18 06:01:49 alc Exp $ + * $Id: pmap.c,v 1.236 1999/05/28 05:38:56 alc Exp $ */ /* @@ -71,7 +71,6 @@ #include "opt_disable_pse.h" #include "opt_pmap.h" #include "opt_msgbuf.h" -#include "opt_vm86.h" #include "opt_user_ldt.h" #include <sys/param.h> diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s index f3d7671..7578618 100644 --- a/sys/amd64/amd64/swtch.s +++ b/sys/amd64/amd64/swtch.s @@ -33,12 +33,11 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.80 1999/05/06 09:44:49 bde Exp $ + * $Id: swtch.s,v 1.81 1999/05/12 21:38:45 luoqi Exp $ */ #include "npx.h" #include "opt_user_ldt.h" -#include "opt_vm86.h" #include <sys/rtprio.h> @@ -277,7 +276,6 @@ _idle: /* update common_tss.tss_esp0 pointer */ movl %ecx, _common_tss + TSS_ESP0 -#ifdef VM86 movl _cpuid, %esi btrl %esi, _private_tss jae 1f @@ -294,7 +292,6 @@ _idle: movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 1: -#endif /* VM86 */ sti @@ -397,7 +394,6 @@ idle_loop: /* update common_tss.tss_esp0 pointer */ movl %esp, _common_tss + TSS_ESP0 -#ifdef VM86 movl $0, %esi btrl %esi, _private_tss jae 1f @@ -413,7 +409,6 @@ idle_loop: movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 1: -#endif /* VM86 */ sti @@ -630,7 +625,6 @@ swtch_com: movl %ebx,%cr3 4: -#ifdef VM86 #ifdef SMP movl _cpuid, %esi #else @@ -642,18 +636,12 @@ swtch_com: movl PCB_EXT(%edx), %edi /* new tss descriptor */ jmp 2f 1: -#endif /* update common_tss.tss_esp0 pointer */ movl %edx, %ebx /* pcb */ -#ifdef VM86 addl $(UPAGES * PAGE_SIZE - 16), %ebx -#else - addl $(UPAGES * PAGE_SIZE), %ebx -#endif /* VM86 */ movl %ebx, _common_tss + TSS_ESP0 -#ifdef VM86 btrl %esi, _private_tss jae 3f #ifdef SMP @@ -672,7 +660,6 @@ swtch_com: movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 3: -#endif /* VM86 */ movl P_VMSPACE(%ecx), %ebx #ifdef SMP movl _cpuid, %eax diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c index 4c40ed4..76b45ff 100644 --- a/sys/amd64/amd64/sys_machdep.c +++ b/sys/amd64/amd64/sys_machdep.c @@ -31,12 +31,11 @@ * SUCH DAMAGE. * * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 - * $Id: sys_machdep.c,v 1.40 1999/04/27 11:14:33 phk Exp $ + * $Id: sys_machdep.c,v 1.41 1999/04/28 01:03:25 luoqi Exp $ * */ #include "opt_user_ldt.h" -#include "opt_vm86.h" #include "opt_smp.h" #include <sys/param.h> @@ -71,11 +70,9 @@ void set_user_ldt __P((struct pcb *pcb)); static int i386_get_ldt __P((struct proc *, char *)); static int i386_set_ldt __P((struct proc *, char *)); #endif -#ifdef VM86 static int i386_get_ioperm __P((struct proc *, char *)); static int i386_set_ioperm __P((struct proc *, char *)); int i386_extend_pcb __P((struct proc *)); -#endif #ifndef _SYS_SYSPROTO_H_ struct sysarch_args { @@ -101,7 +98,6 @@ sysarch(p, uap) error = i386_set_ldt(p, uap->parms); break; #endif -#ifdef VM86 case I386_GET_IOPERM: error = i386_get_ioperm(p, uap->parms); break; @@ -111,7 +107,6 @@ sysarch(p, uap) case I386_VM86: error = vm86_sysarch(p, uap->parms); break; -#endif default: error = EINVAL; break; @@ -119,7 +114,6 @@ sysarch(p, uap) return (error); } -#ifdef VM86 int i386_extend_pcb(struct proc *p) { @@ -251,7 +245,6 @@ done: error = copyout(&ua, args, sizeof(struct i386_ioperm_args)); return (error); } -#endif /* VM86 */ #ifdef USER_LDT /* diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 411d67f..df6e295 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 - * $Id: trap.c,v 1.136 1999/04/28 01:03:26 luoqi Exp $ + * $Id: trap.c,v 1.137 1999/05/06 18:12:17 peter Exp $ */ /* @@ -47,7 +47,6 @@ #include "opt_ktrace.h" #include "opt_clock.h" #include "opt_trap.h" -#include "opt_vm86.h" #include <sys/param.h> #include <sys/systm.h> @@ -90,9 +89,7 @@ #include <machine/clock.h> #endif -#ifdef VM86 #include <machine/vm86.h> -#endif #ifdef DDB extern int in_Debugger, debugger_on_panic; @@ -266,7 +263,6 @@ restart: type = frame.tf_trapno; code = frame.tf_err; -#ifdef VM86 if (in_vm86call) { if (frame.tf_eflags & PSL_VM && (type == T_PROTFLT || type == T_STKFLT)) { @@ -293,7 +289,6 @@ restart: } goto kernel_trap; /* normal kernel trap handling */ } -#endif if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) { /* user trap */ @@ -335,14 +330,12 @@ restart: */ case T_PROTFLT: /* general protection fault */ case T_STKFLT: /* stack fault */ -#ifdef VM86 if (frame.tf_eflags & PSL_VM) { i = vm86_emulate((struct vm86frame *)&frame); if (i == 0) goto out; break; } -#endif /* VM86 */ /* FALL THROUGH */ case T_SEGNPFLT: /* segment not present fault */ @@ -426,9 +419,7 @@ restart: break; } } else { -#ifdef VM86 kernel_trap: -#endif /* kernel trap */ switch (type) { diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index 48a248f..0451ccb 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -38,12 +38,11 @@ * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ - * $Id: vm_machdep.c,v 1.120 1999/02/19 14:25:33 luoqi Exp $ + * $Id: vm_machdep.c,v 1.121 1999/04/19 14:14:13 peter Exp $ */ #include "npx.h" #include "opt_user_ldt.h" -#include "opt_vm86.h" #ifdef PC98 #include "opt_pc98.h" #endif @@ -64,10 +63,8 @@ #ifdef SMP #include <machine/smp.h> #endif -#ifdef VM86 #include <machine/pcb_ext.h> #include <machine/vm86.h> -#endif #include <vm/vm.h> #include <vm/vm_param.h> @@ -133,11 +130,7 @@ cpu_fork(p1, p2) * syscall. This copies the user mode register values. */ p2->p_md.md_regs = (struct trapframe *) -#ifdef VM86 ((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1; -#else - ((int)p2->p_addr + UPAGES * PAGE_SIZE) - 1; -#endif /* VM86 */ *p2->p_md.md_regs = *p1->p_md.md_regs; /* @@ -162,12 +155,10 @@ cpu_fork(p1, p2) #ifdef SMP pcb2->pcb_mpnest = 1; #endif -#ifdef VM86 /* * XXX don't copy the i/o pages. this should probably be fixed. */ pcb2->pcb_ext = 0; -#endif #ifdef USER_LDT /* Copy the LDT, if necessary. */ @@ -216,14 +207,11 @@ void cpu_exit(p) register struct proc *p; { -#if defined(USER_LDT) || defined(VM86) struct pcb *pcb = &p->p_addr->u_pcb; -#endif #if NNPX > 0 npxexit(p); #endif /* NNPX */ -#ifdef VM86 if (pcb->pcb_ext != 0) { /* * XXX do we need to move the TSS off the allocated pages @@ -233,7 +221,6 @@ cpu_exit(p) ctob(IOPAGES + 1)); pcb->pcb_ext = 0; } -#endif #ifdef USER_LDT if (pcb->pcb_ldt != 0) { if (pcb == curpcb) { diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h index ad9e6c6..3365d30 100644 --- a/sys/amd64/include/mptable.h +++ b/sys/amd64/include/mptable.h @@ -22,11 +22,10 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $ + * $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $ */ #include "opt_smp.h" -#include "opt_vm86.h" #include "opt_cpu.h" #include "opt_user_ldt.h" @@ -482,10 +481,8 @@ init_secondary(void) common_tss.tss_esp0 = 0; /* not used until after switch */ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); common_tss.tss_ioopt = (sizeof common_tss) << 16; -#ifdef VM86 tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd; common_tssd = *tss_gdt; -#endif ltr(gsel_tss); load_cr0(0x8005003b); /* XXX! */ diff --git a/sys/amd64/include/pcb.h b/sys/amd64/include/pcb.h index 9ab0856..2dbc707 100644 --- a/sys/amd64/include/pcb.h +++ b/sys/amd64/include/pcb.h @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)pcb.h 5.10 (Berkeley) 5/12/91 - * $Id: pcb.h,v 1.26 1998/02/03 21:27:50 bde Exp $ + * $Id: pcb.h,v 1.27 1999/04/28 01:04:05 luoqi Exp $ */ #ifndef _I386_PCB_H_ @@ -66,11 +66,7 @@ struct pcb { u_long pcb_mpnest_dontuse; #endif int pcb_gs; -#ifdef VM86 struct pcb_ext *pcb_ext; /* optional pcb extension */ -#else - struct pcb_ext *pcb_ext_dontuse; -#endif u_long __pcb_spare[2]; /* adjust to avoid core dump size changes */ }; diff --git a/sys/amd64/include/pcpu.h b/sys/amd64/include/pcpu.h index 456c502..c02009b 100644 --- a/sys/amd64/include/pcpu.h +++ b/sys/amd64/include/pcpu.h @@ -23,7 +23,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: globaldata.h,v 1.8 1999/04/28 01:04:00 luoqi Exp $ + * $Id: globaldata.h,v 1.9 1999/05/12 21:39:00 luoqi Exp $ */ /* @@ -46,10 +46,8 @@ struct globaldata { struct timeval gd_switchtime; struct i386tss gd_common_tss; int gd_switchticks; -#ifdef VM86 struct segment_descriptor gd_common_tssd; struct segment_descriptor *gd_tss_gdt; -#endif #ifdef USER_LDT int gd_currentldt; #endif |