summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/exception.S14
-rw-r--r--sys/amd64/amd64/exception.s14
-rw-r--r--sys/amd64/amd64/locore.S53
-rw-r--r--sys/amd64/amd64/locore.s53
-rw-r--r--sys/amd64/amd64/machdep.c65
-rw-r--r--sys/amd64/amd64/sys_machdep.c5
-rw-r--r--sys/amd64/amd64/trap.c34
-rw-r--r--sys/amd64/amd64/vm_machdep.c10
8 files changed, 208 insertions, 40 deletions
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index 596f4db..0b16ebe 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.46 1997/10/27 16:35:34 bde Exp $
+ * $Id: exception.s,v 1.47 1997/10/27 17:19:20 bde Exp $
*/
#include "npx.h"
@@ -224,6 +224,10 @@ calltrap:
#ifndef SMP
subl %eax,%eax
#endif
+#ifdef VM86
+ cmpl $1,_in_vm86call
+ je 2f /* keep kernel cpl */
+#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
jne 1f
#ifdef VM86
@@ -231,6 +235,7 @@ calltrap:
jne 1f
#endif /* VM86 */
+2:
#ifdef SMP
ECPL_LOCK
#ifdef CPL_AND_CML
@@ -362,6 +367,13 @@ ENTRY(fork_trampoline)
jmp _doreti
+#ifdef VM86
+/*
+ * Include vm86 call routines, which want to call _doreti.
+ */
+#include "i386/i386/vm86bios.s"
+#endif /* VM86 */
+
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index 596f4db..0b16ebe 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: exception.s,v 1.46 1997/10/27 16:35:34 bde Exp $
+ * $Id: exception.s,v 1.47 1997/10/27 17:19:20 bde Exp $
*/
#include "npx.h"
@@ -224,6 +224,10 @@ calltrap:
#ifndef SMP
subl %eax,%eax
#endif
+#ifdef VM86
+ cmpl $1,_in_vm86call
+ je 2f /* keep kernel cpl */
+#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
jne 1f
#ifdef VM86
@@ -231,6 +235,7 @@ calltrap:
jne 1f
#endif /* VM86 */
+2:
#ifdef SMP
ECPL_LOCK
#ifdef CPL_AND_CML
@@ -362,6 +367,13 @@ ENTRY(fork_trampoline)
jmp _doreti
+#ifdef VM86
+/*
+ * Include vm86 call routines, which want to call _doreti.
+ */
+#include "i386/i386/vm86bios.s"
+#endif /* VM86 */
+
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and
diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S
index e5db5cc..a7f0436 100644
--- a/sys/amd64/amd64/locore.S
+++ b/sys/amd64/amd64/locore.S
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
- * $Id: locore.s,v 1.103 1998/01/09 03:20:58 eivind Exp $
+ * $Id: locore.s,v 1.104 1998/01/31 02:53:41 eivind Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@@ -185,6 +185,12 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */
_proc0paddr: .long 0 /* address of proc 0 address space */
p0upa: .long 0 /* phys addr of proc0's UPAGES */
+#ifdef VM86
+ .globl _vm86paddr, _vm86pa
+_vm86paddr: .long 0 /* address of vm86 region */
+_vm86pa: .long 0 /* phys addr of vm86 region */
+#endif
+
#ifdef BDE_DEBUGGER
.globl _bdb_exists /* flag to indicate BDE debugger is present */
_bdb_exists: .long 0
@@ -828,6 +834,13 @@ over_symalloc:
addl $KERNBASE, %esi
movl %esi, R(_proc0paddr)
+#ifdef VM86
+ ALLOCPAGES(4) /* IOPAGES + ext + stack */
+ movl %esi,R(_vm86pa)
+ addl $KERNBASE, %esi
+ movl %esi, R(_vm86paddr)
+#endif /* VM86 */
+
#ifdef SMP
/* Allocate cpu0's private data page */
ALLOCPAGES(1)
@@ -894,6 +907,25 @@ map_read_write:
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys($PG_RW)
+#ifdef VM86
+/* Map space for the vm86 region */
+ movl R(_vm86pa), %eax
+ movl $4, %ecx
+ fillkptphys($PG_RW)
+
+/* Map page 0 into the vm86 page table */
+ movl $0, %eax
+ movl $0, %ebx
+ movl $1, %ecx
+ fillkpt(R(_vm86pa), $PG_RW|PG_U)
+
+/* ...likewise for the ISA hole */
+ movl $ISA_HOLE_START, %eax
+ movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
+ movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
+ fillkpt(R(_vm86pa), $PG_RW|PG_U)
+#endif /* VM86 */
+
#ifdef SMP
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
movl R(cpu0pp), %eax
@@ -922,6 +954,25 @@ map_read_write:
movl $MPPTDI, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), $PG_RW)
+
+/* Fakeup VA for the local apic to allow early traps. */
+ ALLOCPAGES(1)
+ movl %esi, %eax
+ movl $2, %ebx /* pte offset = 2 */
+ movl $1, %ecx /* one private pt coming right up */
+ fillkpt(R(cpu0pt), $PG_RW)
+
+/* Initialize mp lock to allow early traps */
+ movl $1, R(_mp_lock)
+
+/* Initialize curproc to &proc0 */
+ movl R(cpu0pp), %eax
+ movl $CNAME(proc0), 4(%eax)
+
+/* Initialize my_idlePTD to IdlePTD */
+ movl R(_IdlePTD), %ecx
+ movl %ecx,32(%eax)
+
#endif /* SMP */
/* install a pde for temporary double map of bottom of VA */
diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s
index e5db5cc..a7f0436 100644
--- a/sys/amd64/amd64/locore.s
+++ b/sys/amd64/amd64/locore.s
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
- * $Id: locore.s,v 1.103 1998/01/09 03:20:58 eivind Exp $
+ * $Id: locore.s,v 1.104 1998/01/31 02:53:41 eivind Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@@ -185,6 +185,12 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */
_proc0paddr: .long 0 /* address of proc 0 address space */
p0upa: .long 0 /* phys addr of proc0's UPAGES */
+#ifdef VM86
+ .globl _vm86paddr, _vm86pa
+_vm86paddr: .long 0 /* address of vm86 region */
+_vm86pa: .long 0 /* phys addr of vm86 region */
+#endif
+
#ifdef BDE_DEBUGGER
.globl _bdb_exists /* flag to indicate BDE debugger is present */
_bdb_exists: .long 0
@@ -828,6 +834,13 @@ over_symalloc:
addl $KERNBASE, %esi
movl %esi, R(_proc0paddr)
+#ifdef VM86
+ ALLOCPAGES(4) /* IOPAGES + ext + stack */
+ movl %esi,R(_vm86pa)
+ addl $KERNBASE, %esi
+ movl %esi, R(_vm86paddr)
+#endif /* VM86 */
+
#ifdef SMP
/* Allocate cpu0's private data page */
ALLOCPAGES(1)
@@ -894,6 +907,25 @@ map_read_write:
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys($PG_RW)
+#ifdef VM86
+/* Map space for the vm86 region */
+ movl R(_vm86pa), %eax
+ movl $4, %ecx
+ fillkptphys($PG_RW)
+
+/* Map page 0 into the vm86 page table */
+ movl $0, %eax
+ movl $0, %ebx
+ movl $1, %ecx
+ fillkpt(R(_vm86pa), $PG_RW|PG_U)
+
+/* ...likewise for the ISA hole */
+ movl $ISA_HOLE_START, %eax
+ movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
+ movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
+ fillkpt(R(_vm86pa), $PG_RW|PG_U)
+#endif /* VM86 */
+
#ifdef SMP
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
movl R(cpu0pp), %eax
@@ -922,6 +954,25 @@ map_read_write:
movl $MPPTDI, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), $PG_RW)
+
+/* Fakeup VA for the local apic to allow early traps. */
+ ALLOCPAGES(1)
+ movl %esi, %eax
+ movl $2, %ebx /* pte offset = 2 */
+ movl $1, %ecx /* one private pt coming right up */
+ fillkpt(R(cpu0pt), $PG_RW)
+
+/* Initialize mp lock to allow early traps */
+ movl $1, R(_mp_lock)
+
+/* Initialize curproc to &proc0 */
+ movl R(cpu0pp), %eax
+ movl $CNAME(proc0), 4(%eax)
+
+/* Initialize my_idlePTD to IdlePTD */
+ movl R(_IdlePTD), %ecx
+ movl %ecx,32(%eax)
+
#endif /* SMP */
/* install a pde for temporary double map of bottom of VA */
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 14f0edc..1ecb116 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
- * $Id: machdep.c,v 1.291 1998/03/05 19:37:03 tegge Exp $
+ * $Id: machdep.c,v 1.292 1998/03/07 20:16:47 tegge Exp $
*/
#include "apm.h"
@@ -645,8 +645,6 @@ sigreturn(p, uap)
if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
trapsignal(p, SIGBUS, 0);
-#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF)
-#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF)
if (vm86->vm86_has_vme) {
eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
(eflags & VME_USERCHANGE) | PSL_VM;
@@ -1239,11 +1237,43 @@ init386(first)
setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
initializecpu(); /* Initialize CPU registers */
+ /* make an initial tss so cpu can get interrupt stack on syscall! */
+#ifdef VM86
+ common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
+#else
+ common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
+#endif /* VM86 */
+ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
+ common_tss.tss_ioopt = (sizeof common_tss) << 16;
+ gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
+ ltr(gsel_tss);
+#ifdef VM86
+ private_tss = 0;
+ my_tr = GPROC0_SEL;
+#endif
+
+ dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
+ dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
+ dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
+ dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
+ dblfault_tss.tss_cr3 = (int)IdlePTD;
+ dblfault_tss.tss_eip = (int) dblfault_handler;
+ dblfault_tss.tss_eflags = PSL_KERNEL;
+ dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs =
+ dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
+ dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
+ dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
+
+#ifdef VM86
+ initial_bioscalls(&biosbasemem, &biosextmem);
+#else
+
/* Use BIOS values stored in RTC CMOS RAM, since probing
* breaks certain 386 AT relics.
*/
biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
+#endif
/*
* If BIOS tells us that it has more than 640k in the basemem,
@@ -1290,7 +1320,7 @@ init386(first)
* remain read-only and are unused by the kernel.
* The base memory area is below the physical end of
* the kernel and right now forms a read-only hole.
- * The part of it from 0 to
+ * The part of it from PAGE_SIZE to
* (trunc_page(biosbasemem * 1024) - 1) will be
* remapped and used by the kernel later.)
*
@@ -1505,33 +1535,6 @@ init386(first)
avail_end + off, VM_PROT_ALL, TRUE);
msgbufmapped = 1;
- /* make an initial tss so cpu can get interrupt stack on syscall! */
-#ifdef VM86
- common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
-#else
- common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
-#endif /* VM86 */
- common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
- common_tss.tss_ioopt = (sizeof common_tss) << 16;
- gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
- ltr(gsel_tss);
-#ifdef VM86
- private_tss = 0;
- my_tr = GPROC0_SEL;
-#endif
-
- dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
- dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
- dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
- dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
- dblfault_tss.tss_cr3 = (int)IdlePTD;
- dblfault_tss.tss_eip = (int) dblfault_handler;
- dblfault_tss.tss_eflags = PSL_KERNEL;
- dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs =
- dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
- dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
- dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
-
/* make a call gate to reenter kernel with */
gdp = &ldt[LSYS5CALLS_SEL].gd;
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index aed0483..af0ec8e 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
- * $Id: sys_machdep.c,v 1.32 1998/02/09 06:08:18 eivind Exp $
+ * $Id: sys_machdep.c,v 1.33 1998/02/13 05:25:37 bde Exp $
*
*/
@@ -140,7 +140,7 @@ i386_extend_pcb(struct proc *p)
if (ext == 0)
return (ENOMEM);
p->p_addr->u_pcb.pcb_ext = ext;
- bzero(&ext->ext_tss, sizeof(struct i386tss));
+ bzero(ext, sizeof(struct pcb_ext));
ext->ext_tss.tss_esp0 = (unsigned)p->p_addr + ctob(UPAGES) - 16;
ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
/*
@@ -153,7 +153,6 @@ i386_extend_pcb(struct proc *p)
(offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
ext->ext_iomap = (caddr_t)ext + offset;
ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
- ext->ext_vm86.vm86_inited = 0;
addr = (u_long *)ext->ext_vm86.vm86_intmap;
for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 79715f5..5e1bb32 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
- * $Id: trap.c,v 1.121 1998/02/04 22:32:12 eivind Exp $
+ * $Id: trap.c,v 1.122 1998/02/06 12:13:10 eivind Exp $
*/
/*
@@ -225,6 +225,35 @@ restart:
type = frame.tf_trapno;
code = frame.tf_err;
+#ifdef VM86
+ if (in_vm86call) {
+ if (frame.tf_eflags & PSL_VM &&
+ (type == T_PROTFLT || type == T_STKFLT)) {
+ i = vm86_emulate((struct vm86frame *)&frame);
+ if (i != 0)
+ /*
+ * returns to original process
+ */
+ vm86_trap((struct vm86frame *)&frame);
+ return;
+ }
+ switch (type) {
+ /*
+ * these traps want either a process context, or
+ * assume a normal userspace trap.
+ */
+ case T_PROTFLT:
+ case T_SEGNPFLT:
+ trap_fatal(&frame);
+ return;
+ case T_TRCTRAP:
+ type = T_BPTFLT; /* kernel breakpoint */
+ /* FALL THROUGH */
+ }
+ goto kernel_trap; /* normal kernel trap handling */
+ }
+#endif
+
if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
/* user trap */
@@ -356,6 +385,9 @@ restart:
break;
}
} else {
+#ifdef VM86
+kernel_trap:
+#endif
/* kernel trap */
switch (type) {
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 8ac19b1..b157bb6 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
- * $Id: vm_machdep.c,v 1.103 1998/03/14 03:02:15 tegge Exp $
+ * $Id: vm_machdep.c,v 1.104 1998/03/17 09:10:05 kato Exp $
*/
#include "npx.h"
@@ -65,6 +65,10 @@
#ifdef SMP
#include <machine/smp.h>
#endif
+#ifdef VM86
+#include <machine/pcb_ext.h>
+#include <machine/vm86.h>
+#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -599,7 +603,11 @@ cpu_fork(p1, p2)
* syscall. This copies the user mode register values.
*/
p2->p_md.md_regs = (struct trapframe *)
+#ifdef VM86
+ ((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
+#else
((int)p2->p_addr + UPAGES * PAGE_SIZE) - 1;
+#endif /* VM86 */
*p2->p_md.md_regs = *p1->p_md.md_regs;
/*
OpenPOWER on IntegriCloud