diff options
author | dyson <dyson@FreeBSD.org> | 1997-08-09 00:04:06 +0000 |
---|---|---|
committer | dyson <dyson@FreeBSD.org> | 1997-08-09 00:04:06 +0000 |
commit | ad0649e2b977efaa77b68c699a1f44b12e7429d1 (patch) | |
tree | 925573ddb83b4a0cf21a7d136de8f44817243b6c | |
parent | 56b351207af0157d77bdd770e72c7ff038d57164 (diff) | |
download | FreeBSD-src-ad0649e2b977efaa77b68c699a1f44b12e7429d1.zip FreeBSD-src-ad0649e2b977efaa77b68c699a1f44b12e7429d1.tar.gz |
VM86 kernel support.
Work done by BSDI, Jonathan Lemon <jlemon@americantv.com>,
Mike Smith <msmith@gsoft.com.au>, Sean Eric Fagan <sef@kithrup.com>,
and probably alot of others.
Submitted by: Jnathan Lemon <jlemon@americantv.com>
41 files changed, 987 insertions, 245 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index 14d0a8a..61b1dfa 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.5 1997/08/04 17:17:29 smp Exp smp $ + * $Id: swtch.s,v 1.58 1997/08/04 17:31:43 fsmp Exp $ */ #include "npx.h" @@ -257,16 +257,27 @@ _idle: movl %ecx,%cr3 /* update common_tss.tss_esp0 pointer */ +#ifdef VM86 + movl $GPROC0_SEL, %esi +#endif /* VM86 */ movl $_common_tss, %eax movl %esp, TSS_ESP0(%eax) -#ifdef TSS_IS_CACHED /* example only */ - /* Reload task register to force reload of selector */ - movl _tssptr, %ebx - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ - movl _gsel_tss, %ebx - ltr %bx -#endif +#ifdef VM86 + btrl %esi, _private_tss + je 1f + movl $_common_tssd, %edi + + /* move correct tss descriptor into GDT slot, then reload tr */ + leal _gdt(,%esi,8), %ebx /* entry in GDT */ + movl 0(%edi), %eax + movl %eax, 0(%ebx) + movl 4(%edi), %eax + movl %eax, 4(%ebx) + shll $3, %esi /* GSEL(entry, SEL_KPL) */ + ltr %si +1: +#endif /* VM86 */ sti @@ -472,33 +483,41 @@ swtch_com: movl %ebx, %cr3 #endif /* SMP */ -#ifdef HOW_TO_SWITCH_TSS /* example only */ - /* Fix up tss pointer to floating pcb/stack structure */ - /* XXX probably lots faster to store the 64 bits of tss entry - * in the pcb somewhere and copy them on activation. - */ - movl _tssptr, %ebx - movl %edx, %eax /* edx = pcb/tss */ - movw %ax, 2(%ebx) /* store bits 0->15 */ - roll $16, %eax /* swap upper and lower */ - movb %al, 4(%ebx) /* store bits 16->23 */ - movb %ah, 7(%ebx) /* store bits 24->31 */ - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ +#ifdef VM86 + movl $GPROC0_SEL, %esi + cmpl $0, PCB_EXT(%edx) /* has pcb extension? */ + je 1f + btsl %esi, _private_tss /* mark use of private tss */ + movl PCB_EXT(%edx), %edi /* new tss descriptor */ + jmp 2f +1: #endif /* update common_tss.tss_esp0 pointer */ movl $_common_tss, %eax movl %edx, %ebx /* pcb */ +#ifdef VM86 + addl $(UPAGES * PAGE_SIZE - 16), %ebx +#else addl $(UPAGES * PAGE_SIZE), %ebx +#endif /* VM86 */ movl %ebx, TSS_ESP0(%eax) -#ifdef TSS_IS_CACHED /* example only */ - /* Reload task register to force reload of selector */ - movl _tssptr, %ebx - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ - movl _gsel_tss, %ebx - ltr %bx -#endif +#ifdef VM86 + btrl %esi, _private_tss + je 3f + movl $_common_tssd, %edi +2: + /* move correct tss descriptor into GDT slot, then reload tr */ + leal _gdt(,%esi,8), %ebx /* entry in GDT */ + movl 0(%edi), %eax + movl %eax, 0(%ebx) + movl 4(%edi), %eax + movl %eax, 4(%ebx) + shll $3, %esi /* GSEL(entry, SEL_KPL) */ + ltr %si +3: +#endif /* VM86 */ /* restore context */ movl PCB_EBX(%edx),%ebx diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S index 18715d2..f51cd0f 100644 --- a/sys/amd64/amd64/exception.S +++ b/sys/amd64/amd64/exception.S @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: exception.s,v 1.3 1997/07/30 22:51:11 smp Exp smp $ + * $Id: exception.s,v 1.34 1997/07/31 05:43:02 fsmp Exp $ */ #include "npx.h" /* NNPX */ @@ -220,6 +220,10 @@ calltrap: subl %eax,%eax testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) jne 1f +#ifdef VM86 + testl $PSL_VM,TF_EFLAGS(%esp) + jne 1f +#endif /* VM86 */ movl _cpl,%eax 1: /* diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s index 18715d2..f51cd0f 100644 --- a/sys/amd64/amd64/exception.s +++ b/sys/amd64/amd64/exception.s @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: exception.s,v 1.3 1997/07/30 22:51:11 smp Exp smp $ + * $Id: exception.s,v 1.34 1997/07/31 05:43:02 fsmp Exp $ */ #include "npx.h" /* NNPX */ @@ -220,6 +220,10 @@ calltrap: subl %eax,%eax testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) jne 1f +#ifdef VM86 + testl $PSL_VM,TF_EFLAGS(%esp) + jne 1f +#endif /* VM86 */ movl _cpl,%eax 1: /* diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c index 4dd1e3c..32fa57a 100644 --- a/sys/amd64/amd64/fpu.c +++ b/sys/amd64/amd64/fpu.c @@ -32,7 +32,7 @@ * SUCH DAMAGE. * * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 - * $Id: npx.c,v 1.3 1997/07/20 23:30:38 smp Exp smp $ + * $Id: npx.c,v 1.49 1997/07/21 07:57:50 fsmp Exp $ */ #include "npx.h" @@ -515,7 +515,7 @@ npxintr(unit) * Pass exception to process. */ frame = (struct intrframe *)&unit; /* XXX */ - if (ISPL(frame->if_cs) == SEL_UPL) { + if ((ISPL(frame->if_cs) == SEL_UPL) || (frame->if_eflags & PSL_VM)) { /* * Interrupt is essentially a trap, so we can afford to call * the SIGFPE handler (if any) as soon as the interrupt diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c index d9473cf..38bb5c0 100644 --- a/sys/amd64/amd64/genassym.c +++ b/sys/amd64/amd64/genassym.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 - * $Id: genassym.c,v 1.46 1997/05/10 08:01:13 bde Exp $ + * $Id: genassym.c,v 1.47 1997/06/07 04:36:09 bde Exp $ */ #include <stdio.h> @@ -129,6 +129,9 @@ main() printf("#define\tPCB_USERLDT %p\n", &pcb->pcb_ldt); printf("#define\tPCB_FS %p\n", &pcb->pcb_fs); printf("#define\tPCB_GS %p\n", &pcb->pcb_gs); +#ifdef VM86 + printf("#define\tPCB_EXT %p\n", &pcb->pcb_ext); +#endif /* VM86 */ #ifdef SMP printf("#define\tPCB_MPNEST %p\n", &pcb->pcb_mpnest); #endif @@ -184,5 +187,7 @@ main() printf("#define\tBI_SYMTAB %p\n", &bootinfo->bi_symtab); printf("#define\tBI_ESYMTAB %p\n", &bootinfo->bi_esymtab); + printf("#define\tGPROC0_SEL %d\n", GPROC0_SEL); + return (0); } diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S index 57af042..a420cbc 100644 --- a/sys/amd64/amd64/locore.S +++ b/sys/amd64/amd64/locore.S @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 - * $Id: locore.s,v 1.92 1997/07/17 19:44:53 dyson Exp $ + * $Id: locore.s,v 1.93 1997/07/20 08:37:18 bde Exp $ * * originally from: locore.s, by William F. Jolitz * @@ -335,6 +335,18 @@ _pc98_system_parameter: call create_pagetables +#ifdef VM86 +/* + * If the CPU has support for VME, turn it on. + */ + testl $CPUID_VME, R(_cpu_feature) + jz 1f + movl %cr4, %eax + orl $CR4_VME, %eax + movl %eax, %cr4 +1: +#endif /* VM86 */ + #ifdef BDE_DEBUGGER /* * Adjust as much as possible for paging before enabling paging so that the diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s index 57af042..a420cbc 100644 --- a/sys/amd64/amd64/locore.s +++ b/sys/amd64/amd64/locore.s @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 - * $Id: locore.s,v 1.92 1997/07/17 19:44:53 dyson Exp $ + * $Id: locore.s,v 1.93 1997/07/20 08:37:18 bde Exp $ * * originally from: locore.s, by William F. Jolitz * @@ -335,6 +335,18 @@ _pc98_system_parameter: call create_pagetables +#ifdef VM86 +/* + * If the CPU has support for VME, turn it on. + */ + testl $CPUID_VME, R(_cpu_feature) + jz 1f + movl %cr4, %eax + orl $CR4_VME, %eax + movl %eax, %cr4 +1: +#endif /* VM86 */ + #ifdef BDE_DEBUGGER /* * Adjust as much as possible for paging before enabling paging so that the diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index dbaacef..0205647 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 - * $Id: machdep.c,v 1.253 1997/07/20 08:37:19 bde Exp $ + * $Id: machdep.c,v 1.254 1997/08/05 00:01:10 dyson Exp $ */ #include "apm.h" @@ -106,6 +106,7 @@ #include <machine/cons.h> #include <machine/bootinfo.h> #include <machine/md_var.h> +#include <machine/pcb_ext.h> #ifdef SMP #include <machine/smp.h> #endif @@ -476,7 +477,7 @@ sendsig(catcher, sig, mask, code) * if access is denied. */ if ((grow(p, (int)fp) == FALSE) || - (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == FALSE)) { + (useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. @@ -527,6 +528,34 @@ sendsig(catcher, sig, mask, code) sf.sf_sc.sc_fp = regs->tf_ebp; sf.sf_sc.sc_pc = regs->tf_eip; sf.sf_sc.sc_ps = regs->tf_eflags; + sf.sf_sc.sc_trapno = regs->tf_trapno; + sf.sf_sc.sc_err = regs->tf_err; + + /* + * If we're a vm86 process, we want to save the segment registers. + * We also change eflags to be our emulated eflags, not the actual + * eflags. + */ + if (regs->tf_eflags & PSL_VM) { + struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; + struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; + + sf.sf_sc.sc_gs = tf->tf_vm86_gs; + sf.sf_sc.sc_fs = tf->tf_vm86_fs; + sf.sf_sc.sc_es = tf->tf_vm86_es; + sf.sf_sc.sc_ds = tf->tf_vm86_ds; + + if (vm86->vm86_has_vme == 0) + sf.sf_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) + | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); + + /* + * We should never have PSL_T set when returning from vm86 + * mode. It may be set here if we deliver a signal before + * getting to vm86 mode, so turn it off. + */ + tf->tf_eflags &= ~(PSL_VM | PSL_T | PSL_VIF | PSL_VIP); + } /* * Copy the sigframe out to the user's stack. @@ -537,11 +566,10 @@ sendsig(catcher, sig, mask, code) * ...Kill the process. */ sigexit(p, SIGILL); - }; + } regs->tf_esp = (int)fp; regs->tf_eip = (int)(((char *)PS_STRINGS) - *(p->p_sysent->sv_szsigcode)); - regs->tf_eflags &= ~PSL_VM; regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; @@ -583,42 +611,78 @@ sigreturn(p, uap, retval) if (useracc((caddr_t)fp, sizeof (*fp), B_WRITE) == 0) return(EFAULT); - /* - * Don't allow users to change privileged or reserved flags. - */ -#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) eflags = scp->sc_ps; - /* - * XXX do allow users to change the privileged flag PSL_RF. The - * cpu sets PSL_RF in tf_eflags for faults. Debuggers should - * sometimes set it there too. tf_eflags is kept in the signal - * context during signal handling and there is no other place - * to remember it, so the PSL_RF bit may be corrupted by the - * signal handler without us knowing. Corruption of the PSL_RF - * bit at worst causes one more or one less debugger trap, so - * allowing it is fairly harmless. - */ - if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { + if (eflags & PSL_VM) { + struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; + struct vm86_kernel *vm86; + + /* + * if pcb_ext == 0 or vm86_inited == 0, the user hasn't + * set up the vm86 area, and we can't enter vm86 mode. + */ + if (p->p_addr->u_pcb.pcb_ext == 0) + return (EINVAL); + vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; + if (vm86->vm86_inited == 0) + return (EINVAL); + + /* go back to user mode if both flags are set */ + if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) + trapsignal(p, SIGBUS, 0); + +#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF) +#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF) + if (vm86->vm86_has_vme) { + eflags = (tf->tf_eflags & ~VME_USERCHANGE) | + (eflags & VME_USERCHANGE) | PSL_VM; + } else { + vm86->vm86_eflags = eflags; /* save VIF, VIP */ + eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; + } + tf->tf_vm86_ds = scp->sc_ds; + tf->tf_vm86_es = scp->sc_es; + tf->tf_vm86_fs = scp->sc_fs; + tf->tf_vm86_gs = scp->sc_gs; + tf->tf_ds = _udatasel; + tf->tf_es = _udatasel; + } else { + /* + * Don't allow users to change privileged or reserved flags. + */ +#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) + /* + * XXX do allow users to change the privileged flag PSL_RF. + * The cpu sets PSL_RF in tf_eflags for faults. Debuggers + * should sometimes set it there too. tf_eflags is kept in + * the signal context during signal handling and there is no + * other place to remember it, so the PSL_RF bit may be + * corrupted by the signal handler without us knowing. + * Corruption of the PSL_RF bit at worst causes one more or + * one less debugger trap, so allowing it is fairly harmless. + */ + if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { #ifdef DEBUG - printf("sigreturn: eflags = 0x%x\n", eflags); + printf("sigreturn: eflags = 0x%x\n", eflags); #endif - return(EINVAL); - } + return(EINVAL); + } - /* - * Don't allow users to load a valid privileged %cs. Let the - * hardware check for invalid selectors, excess privilege in - * other selectors, invalid %eip's and invalid %esp's. - */ + /* + * Don't allow users to load a valid privileged %cs. Let the + * hardware check for invalid selectors, excess privilege in + * other selectors, invalid %eip's and invalid %esp's. + */ #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) - if (!CS_SECURE(scp->sc_cs)) { + if (!CS_SECURE(scp->sc_cs)) { #ifdef DEBUG - printf("sigreturn: cs = 0x%x\n", scp->sc_cs); + printf("sigreturn: cs = 0x%x\n", scp->sc_cs); #endif - trapsignal(p, SIGBUS, T_PROTFLT); - return(EINVAL); + trapsignal(p, SIGBUS, T_PROTFLT); + return(EINVAL); + } + regs->tf_ds = scp->sc_ds; + regs->tf_es = scp->sc_es; } - /* restore scratch registers */ regs->tf_eax = scp->sc_eax; regs->tf_ebx = scp->sc_ebx; @@ -627,8 +691,6 @@ sigreturn(p, uap, retval) regs->tf_esi = scp->sc_esi; regs->tf_edi = scp->sc_edi; regs->tf_cs = scp->sc_cs; - regs->tf_ds = scp->sc_ds; - regs->tf_es = scp->sc_es; regs->tf_ss = scp->sc_ss; regs->tf_isp = scp->sc_isp; @@ -787,6 +849,10 @@ struct region_descriptor r_gdt, r_idt; extern struct i386tss common_tss; /* One tss per cpu */ #else struct i386tss common_tss; +#ifdef VM86 +struct segment_descriptor common_tssd; +u_int private_tss = 0; /* flag indicating private tss */ +#endif /* VM86 */ #endif static struct i386tss dblfault_tss; @@ -794,10 +860,6 @@ static char dblfault_stack[PAGE_SIZE]; extern struct user *proc0paddr; -#ifdef TSS_IS_CACHED /* cpu_switch helper */ -struct segment_descriptor *tssptr; -int gsel_tss; -#endif /* software prototypes -- in more palatable form */ struct soft_segment_descriptor gdt_segs[ @@ -1005,9 +1067,8 @@ init386(first) int x; unsigned biosbasemem, biosextmem; struct gate_descriptor *gdp; -#ifndef TSS_IS_CACHED int gsel_tss; -#endif + struct isa_device *idp; #ifndef SMP /* table descriptors - used to load tables by microp */ @@ -1044,6 +1105,9 @@ init386(first) #endif for (x = 0; x < NGDT1; x++) ssdtosd(&gdt_segs[x], &gdt[x].sd); +#ifdef VM86 + common_tssd = gdt[GPROC0_SEL].sd; +#endif /* VM86 */ #ifdef SMP /* @@ -1380,7 +1444,11 @@ init386(first) msgbufmapped = 1; /* make an initial tss so cpu can get interrupt stack on syscall! */ +#ifdef VM86 + common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; +#else common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE; +#endif /* VM86 */ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; common_tss.tss_ioopt = (sizeof common_tss) << 16; gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); @@ -1398,10 +1466,6 @@ init386(first) dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); -#ifdef TSS_IS_CACHED /* cpu_switch helper */ - tssptr = &gdt[GPROC0_SEL].sd; -#endif - /* make a call gate to reenter kernel with */ gdp = &ldt[LSYS5CALLS_SEL].gd; @@ -1426,6 +1490,7 @@ init386(first) proc0.p_addr->u_pcb.pcb_flags = 0; proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; proc0.p_addr->u_pcb.pcb_mpnest = 1; + proc0.p_addr->u_pcb.pcb_ext = 0; } int diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S index 3f5c5fb..811e068 100644 --- a/sys/amd64/amd64/support.S +++ b/sys/amd64/amd64/support.S @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: support.s,v 1.54 1997/06/22 16:03:33 peter Exp $ + * $Id: support.s,v 1.55 1997/07/17 04:33:40 dyson Exp $ */ #include "npx.h" @@ -1527,7 +1527,7 @@ ENTRY(rcr4) movl %cr4,%eax ret -/* void load_cr3(caddr_t cr3) */ +/* void load_cr4(caddr_t cr4) */ ENTRY(load_cr4) movl 4(%esp),%eax movl %eax,%cr4 diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s index 3f5c5fb..811e068 100644 --- a/sys/amd64/amd64/support.s +++ b/sys/amd64/amd64/support.s @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: support.s,v 1.54 1997/06/22 16:03:33 peter Exp $ + * $Id: support.s,v 1.55 1997/07/17 04:33:40 dyson Exp $ */ #include "npx.h" @@ -1527,7 +1527,7 @@ ENTRY(rcr4) movl %cr4,%eax ret -/* void load_cr3(caddr_t cr3) */ +/* void load_cr4(caddr_t cr4) */ ENTRY(load_cr4) movl 4(%esp),%eax movl %eax,%cr4 diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s index 14d0a8a..61b1dfa 100644 --- a/sys/amd64/amd64/swtch.s +++ b/sys/amd64/amd64/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.5 1997/08/04 17:17:29 smp Exp smp $ + * $Id: swtch.s,v 1.58 1997/08/04 17:31:43 fsmp Exp $ */ #include "npx.h" @@ -257,16 +257,27 @@ _idle: movl %ecx,%cr3 /* update common_tss.tss_esp0 pointer */ +#ifdef VM86 + movl $GPROC0_SEL, %esi +#endif /* VM86 */ movl $_common_tss, %eax movl %esp, TSS_ESP0(%eax) -#ifdef TSS_IS_CACHED /* example only */ - /* Reload task register to force reload of selector */ - movl _tssptr, %ebx - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ - movl _gsel_tss, %ebx - ltr %bx -#endif +#ifdef VM86 + btrl %esi, _private_tss + je 1f + movl $_common_tssd, %edi + + /* move correct tss descriptor into GDT slot, then reload tr */ + leal _gdt(,%esi,8), %ebx /* entry in GDT */ + movl 0(%edi), %eax + movl %eax, 0(%ebx) + movl 4(%edi), %eax + movl %eax, 4(%ebx) + shll $3, %esi /* GSEL(entry, SEL_KPL) */ + ltr %si +1: +#endif /* VM86 */ sti @@ -472,33 +483,41 @@ swtch_com: movl %ebx, %cr3 #endif /* SMP */ -#ifdef HOW_TO_SWITCH_TSS /* example only */ - /* Fix up tss pointer to floating pcb/stack structure */ - /* XXX probably lots faster to store the 64 bits of tss entry - * in the pcb somewhere and copy them on activation. - */ - movl _tssptr, %ebx - movl %edx, %eax /* edx = pcb/tss */ - movw %ax, 2(%ebx) /* store bits 0->15 */ - roll $16, %eax /* swap upper and lower */ - movb %al, 4(%ebx) /* store bits 16->23 */ - movb %ah, 7(%ebx) /* store bits 24->31 */ - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ +#ifdef VM86 + movl $GPROC0_SEL, %esi + cmpl $0, PCB_EXT(%edx) /* has pcb extension? */ + je 1f + btsl %esi, _private_tss /* mark use of private tss */ + movl PCB_EXT(%edx), %edi /* new tss descriptor */ + jmp 2f +1: #endif /* update common_tss.tss_esp0 pointer */ movl $_common_tss, %eax movl %edx, %ebx /* pcb */ +#ifdef VM86 + addl $(UPAGES * PAGE_SIZE - 16), %ebx +#else addl $(UPAGES * PAGE_SIZE), %ebx +#endif /* VM86 */ movl %ebx, TSS_ESP0(%eax) -#ifdef TSS_IS_CACHED /* example only */ - /* Reload task register to force reload of selector */ - movl _tssptr, %ebx - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ - movl _gsel_tss, %ebx - ltr %bx -#endif +#ifdef VM86 + btrl %esi, _private_tss + je 3f + movl $_common_tssd, %edi +2: + /* move correct tss descriptor into GDT slot, then reload tr */ + leal _gdt(,%esi,8), %ebx /* entry in GDT */ + movl 0(%edi), %eax + movl %eax, 0(%ebx) + movl 4(%edi), %eax + movl %eax, 4(%ebx) + shll $3, %esi /* GSEL(entry, SEL_KPL) */ + ltr %si +3: +#endif /* VM86 */ /* restore context */ movl PCB_EBX(%edx),%ebx diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c index 4fc52a2..be355bb 100644 --- a/sys/amd64/amd64/sys_machdep.c +++ b/sys/amd64/amd64/sys_machdep.c @@ -31,7 +31,7 @@ * SUCH DAMAGE. * * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 - * $Id: sys_machdep.c,v 1.21 1997/02/22 09:32:53 peter Exp $ + * $Id: sys_machdep.c,v 1.22 1997/07/20 08:37:23 bde Exp $ * */ @@ -51,6 +51,7 @@ #include <machine/cpu.h> #include <machine/sysarch.h> +#include <machine/pcb_ext.h> #include <vm/vm_kern.h> /* for kernel_map */ @@ -66,6 +67,12 @@ void set_user_ldt __P((struct pcb *pcb)); static int i386_get_ldt __P((struct proc *, char *, int *)); static int i386_set_ldt __P((struct proc *, char *, int *)); #endif +#ifdef VM86 +static int i386_get_ioperm __P((struct proc *, char *, int *)); +static int i386_set_ioperm __P((struct proc *, char *, int *)); +int i386_extend_pcb __P((struct proc *)); +int (*vm86_sysarch) __P((struct proc *, char *, int *)); +#endif #ifndef _SYS_SYSPROTO_H_ struct sysarch_args { @@ -92,12 +99,160 @@ sysarch(p, uap, retval) error = i386_set_ldt(p, uap->parms, retval); break; #endif +#ifdef VM86 + case I386_GET_IOPERM: + error = i386_get_ioperm(p, uap->parms, retval); + break; + case I386_SET_IOPERM: + error = i386_set_ioperm(p, uap->parms, retval); + break; + case I386_VM86: + if (vm86_sysarch) { + error = (*vm86_sysarch)(p, uap->parms, retval); + break; + } + /* FALL THROUGH */ +#endif default: error = EINVAL; break; } - return(error); + return (error); +} + +#ifdef VM86 +int +i386_extend_pcb(struct proc *p) +{ + int i, offset; + u_long *addr; + struct pcb_ext *ext; + struct segment_descriptor sd; + struct soft_segment_descriptor ssd = { + 0, /* segment base address (overwritten) */ + ctob(IOPAGES + 1) - 1, /* length */ + SDT_SYS386TSS, /* segment type */ + 0, /* priority level */ + 1, /* descriptor present */ + 0, 0, + 0, /* default 32 size */ + 0 /* granularity */ + }; + + ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1)); + if (ext == 0) + return (ENOMEM); + p->p_addr->u_pcb.pcb_ext = ext; + bzero(&ext->ext_tss, sizeof(struct i386tss)); + ext->ext_tss.tss_esp0 = (unsigned)p->p_addr + ctob(UPAGES) - 16; + ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); + /* + * The last byte of the i/o map must be followed by an 0xff byte. + * We arbitrarily allocate 16 bytes here, to keep the starting + * address on a doubleword boundary. + */ + offset = PAGE_SIZE - 16; + ext->ext_tss.tss_ioopt = + (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16; + ext->ext_iomap = (caddr_t)ext + offset; + ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32; + ext->ext_vm86.vm86_inited = 0; + + addr = (u_long *)ext->ext_vm86.vm86_intmap; + for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++) + *addr++ = ~0; + + ssd.ssd_base = (unsigned)&ext->ext_tss; + ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext); + ssdtosd(&ssd, &ext->ext_tssd); + + /* switch to the new TSS after syscall completes */ + need_resched(); + + return 0; +} + +struct i386_ioperm_args { + u_short start; + u_short length; + u_char enable; +}; + +static int +i386_set_ioperm(p, args, retval) + struct proc *p; + char *args; + int *retval; +{ + int i, error = 0; + struct i386_ioperm_args ua; + char *iomap; + + if (error = copyin(args, &ua, sizeof(struct i386_ioperm_args))) + return (error); + + /* Only root can do this */ + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + /* + * XXX + * While this is restricted to root, we should probably figure out + * whether any other driver is using this i/o address, as so not to + * cause confusion. This probably requires a global 'usage registry'. + */ + + if (p->p_addr->u_pcb.pcb_ext == 0) + if (error = i386_extend_pcb(p)) + return (error); + iomap = (char *)p->p_addr->u_pcb.pcb_ext->ext_iomap; + + if ((int)(ua.start + ua.length) > 0xffff) + return (EINVAL); + + for (i = ua.start; i < (int)(ua.start + ua.length) + 1; i++) { + if (ua.enable) + iomap[i >> 3] &= ~(1 << (i & 7)); + else + iomap[i >> 3] |= (1 << (i & 7)); + } + return (error); +} + +static int +i386_get_ioperm(p, args, retval) + struct proc *p; + char *args; + int *retval; +{ + int i, state, error = 0; + struct i386_ioperm_args ua; + char *iomap; + + if (error = copyin(args, &ua, sizeof(struct i386_ioperm_args))) + return (error); + + if (p->p_addr->u_pcb.pcb_ext == 0) { + ua.length = 0; + goto done; + } + + iomap = (char *)p->p_addr->u_pcb.pcb_ext->ext_iomap; + + state = (iomap[i >> 3] >> (i & 7)) & 1; + ua.enable = !state; + ua.length = 1; + + for (i = ua.start + 1; i < 0x10000; i++) { + if (state != ((iomap[i >> 3] >> (i & 7)) & 1)) + break; + ua.length++; + } + +done: + error = copyout(&ua, args, sizeof(struct i386_ioperm_args)); + return (error); } +#endif /* VM86 */ #ifdef USER_LDT /* diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 6e8c0c3..7110933 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 - * $Id: trap.c,v 1.100 1997/06/22 16:03:37 peter Exp $ + * $Id: trap.c,v 1.101 1997/07/20 08:37:23 bde Exp $ */ /* @@ -85,6 +85,7 @@ extern struct i386tss common_tss; +int (*vm86_emulate) __P((struct vm86frame *)); int (*pmath_emulate) __P((struct trapframe *)); extern void trap __P((struct trapframe frame)); @@ -192,7 +193,7 @@ trap(frame) type = frame.tf_trapno; code = frame.tf_err; - if (ISPL(frame.tf_cs) == SEL_UPL) { + if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) { /* user trap */ sticks = p->p_sticks; @@ -225,9 +226,22 @@ trap(frame) } goto out; + /* + * The following two traps can happen in + * vm86 mode, and, if so, we want to handle + * them specially. + */ case T_PROTFLT: /* general protection fault */ - case T_SEGNPFLT: /* segment not present fault */ case T_STKFLT: /* stack fault */ + if (vm86_emulate && (frame.tf_eflags & PSL_VM)) { + i = (*vm86_emulate)((struct vm86frame *)&frame); + if (i == 0) + goto out; + break; + } + /* FALL THROUGH */ + + case T_SEGNPFLT: /* segment not present fault */ case T_TSSFLT: /* invalid TSS fault */ case T_DOUBLEFLT: /* double fault */ default: @@ -694,6 +708,7 @@ trap_fatal(frame) if (type <= MAX_TRAP_MSG) printf("\n\nFatal trap %d: %s while in %s mode\n", type, trap_msg[type], + frame->tf_eflags & PSL_VM ? "vm86" : ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); #ifdef SMP printf("cpuid = %d\n", cpuid); @@ -707,7 +722,7 @@ trap_fatal(frame) } printf("instruction pointer = 0x%x:0x%x\n", frame->tf_cs & 0xffff, frame->tf_eip); - if (ISPL(frame->tf_cs) == SEL_UPL) { + if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { ss = frame->tf_ss & 0xffff; esp = frame->tf_esp; } else { @@ -946,7 +961,7 @@ bad: break; } - if (frame.tf_eflags & PSL_T) { + if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { /* Traced syscall. */ frame.tf_eflags &= ~PSL_T; trapsignal(p, SIGTRAP, 0); diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index 23198d9..2db2844 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -38,7 +38,7 @@ * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ - * $Id: vm_machdep.c,v 1.83 1997/06/26 02:04:34 tegge Exp $ + * $Id: vm_machdep.c,v 1.84 1997/07/20 08:37:24 bde Exp $ */ #include "npx.h" @@ -55,6 +55,7 @@ #include <machine/clock.h> #include <machine/cpu.h> #include <machine/md_var.h> +#include <machine/pcb_ext.h> #include <vm/vm.h> #include <vm/vm_param.h> @@ -603,6 +604,13 @@ cpu_fork(p1, p2) * pcb2->pcb_onfault: cloned above (always NULL here?). */ +#ifdef VM86 + /* + * XXX don't copy the i/o pages. this should probably be fixed. + */ + pcb2->pcb_ext = 0; +#endif + #ifdef USER_LDT /* Copy the LDT, if necessary. */ if (pcb2->pcb_ldt != 0) { @@ -650,15 +658,25 @@ void cpu_exit(p) register struct proc *p; { -#ifdef USER_LDT - struct pcb *pcb; +#if defined(USER_LDT) || defined(VM86) + struct pcb *pcb = &p->p_addr->u_pcb; #endif #if NNPX > 0 npxexit(p); #endif /* NNPX */ +#ifdef VM86 + if (pcb->pcb_ext != 0) { + /* + * XXX do we need to move the TSS off the allocated pages + * before freeing them? (not done here) + */ + kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext, + ctob(IOPAGES + 1)); + pcb->pcb_ext = 0; + } +#endif #ifdef USER_LDT - pcb = &p->p_addr->u_pcb; if (pcb->pcb_ldt != 0) { if (pcb == curpcb) lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h index d28079a..0aedc2a 100644 --- a/sys/amd64/include/cpu.h +++ b/sys/amd64/include/cpu.h @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)cpu.h 5.4 (Berkeley) 5/9/91 - * $Id: cpu.h,v 1.28 1997/02/22 09:34:04 peter Exp $ + * $Id: cpu.h,v 1.29 1997/04/07 07:15:58 peter Exp $ */ #ifndef _MACHINE_CPU_H_ @@ -43,6 +43,7 @@ /* * Definitions unique to i386 cpu support. */ +#include <machine/psl.h> #include <machine/frame.h> #include <machine/segments.h> @@ -57,7 +58,9 @@ #define cpu_setstack(p, ap) ((p)->p_md.md_regs[SP] = (ap)) #define cpu_set_init_frame(p, fp) ((p)->p_md.md_regs = (fp)) -#define CLKF_USERMODE(framep) (ISPL((framep)->cf_cs) == SEL_UPL) +#define CLKF_USERMODE(framep) \ + ((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM)) + #define CLKF_INTR(framep) (intr_nesting_level >= 2) #if 0 /* diff --git a/sys/amd64/include/frame.h b/sys/amd64/include/frame.h index 43201f2..05092c2 100644 --- a/sys/amd64/include/frame.h +++ b/sys/amd64/include/frame.h @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)frame.h 5.2 (Berkeley) 1/18/91 - * $Id$ + * $Id: frame.h,v 1.14 1997/02/22 09:34:38 peter Exp $ */ #ifndef _MACHINE_FRAME_H_ @@ -72,6 +72,35 @@ struct trapframe { int tf_ss; }; +/* Superset of trap frame, for traps from virtual-8086 mode */ + +struct trapframe_vm86 { + int tf_es; + int tf_ds; + int tf_edi; + int tf_esi; + int tf_ebp; + int tf_isp; + int tf_ebx; + int tf_edx; + int tf_ecx; + int tf_eax; + int tf_trapno; + /* below portion defined in 386 hardware */ + int tf_err; + int tf_eip; + int tf_cs; + int tf_eflags; + /* below only when crossing rings (e.g. user to kernel) */ + int tf_esp; + int tf_ss; + /* below only when switching out of VM86 mode */ + int tf_vm86_es; + int tf_vm86_ds; + int tf_vm86_fs; + int tf_vm86_gs; +}; + /* Interrupt stack frame */ struct intrframe { diff --git a/sys/amd64/include/md_var.h b/sys/amd64/include/md_var.h index 337c0c4..b5f6625 100644 --- a/sys/amd64/include/md_var.h +++ b/sys/amd64/include/md_var.h @@ -26,7 +26,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: md_var.h,v 1.14 1997/06/07 04:36:05 bde Exp $ + * $Id: md_var.h,v 1.15 1997/06/15 02:02:55 wollman Exp $ */ #ifndef _MACHINE_MD_VAR_H_ @@ -68,7 +68,6 @@ void doreti_popl_es __P((void)) __asm(__STRING(doreti_popl_es)); void doreti_popl_es_fault __P((void)) __asm(__STRING(doreti_popl_es_fault)); int fill_regs __P((struct proc *p, struct reg *regs)); void fillw __P((int /*u_short*/ pat, void *base, size_t cnt)); -int fusword __P((void *base)); u_long kvtop __P((void *addr)); void setidt __P((int idx, alias_for_inthand_t *func, int typ, int dpl, int selec)); diff --git a/sys/amd64/include/pcb.h b/sys/amd64/include/pcb.h index 18a9af1..26acab9 100644 --- a/sys/amd64/include/pcb.h +++ b/sys/amd64/include/pcb.h @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)pcb.h 5.10 (Berkeley) 5/12/91 - * $Id: pcb.h,v 1.22 1997/06/07 04:36:05 bde Exp $ + * $Id: pcb.h,v 1.23 1997/06/22 16:03:51 peter Exp $ */ #ifndef _I386_PCB_H_ @@ -45,6 +45,7 @@ */ #include <machine/tss.h> #include <machine/npx.h> +#include <machine/pcb_ext.h> struct pcb { int pcb_cr3; @@ -63,11 +64,8 @@ struct pcb { u_long pcb_mpnest; int pcb_fs; int pcb_gs; - u_long __pcb_spare[5]; /* adjust to avoid core dump size changes */ -#if 0 /* some day we may switch between procs that have their own i386tss */ - struct i386tss pcb_tss; - u_char pcb_iomap[NPORT/sizeof(u_char)]; /* i/o port bitmap */ -#endif + struct pcb_ext *pcb_ext; /* optional pcb extension */ + u_long __pcb_spare[1]; /* adjust to avoid core dump size changes */ }; /* diff --git a/sys/amd64/include/signal.h b/sys/amd64/include/signal.h index 0e860ba..51dc196 100644 --- a/sys/amd64/include/signal.h +++ b/sys/amd64/include/signal.h @@ -31,7 +31,7 @@ * SUCH DAMAGE. * * @(#)signal.h 8.1 (Berkeley) 6/11/93 - * $Id$ + * $Id: signal.h,v 1.7 1997/02/22 09:35:12 peter Exp $ */ #ifndef _MACHINE_SIGNAL_H_ @@ -72,10 +72,15 @@ struct sigcontext { int sc_edx; int sc_ecx; int sc_eax; + int sc_gs; + int sc_fs; # define sc_sp sc_esp # define sc_fp sc_ebp # define sc_pc sc_eip # define sc_ps sc_efl +# define sc_eflags sc_efl + int sc_trapno; + int sc_err; }; #endif /* !_ANSI_SOURCE && !_POSIX_SOURCE */ diff --git a/sys/amd64/include/sysarch.h b/sys/amd64/include/sysarch.h index 746cef1..80694a4 100644 --- a/sys/amd64/include/sysarch.h +++ b/sys/amd64/include/sysarch.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id$ + * $Id: sysarch.h,v 1.6 1997/02/22 09:35:18 peter Exp $ */ /* @@ -43,6 +43,11 @@ #define I386_GET_LDT 0 #define I386_SET_LDT 1 + /* I386_IOPL */ +#define I386_GET_IOPERM 3 +#define I386_SET_IOPERM 4 + /* xxxxx */ +#define I386_VM86 6 #ifdef KERNEL /* nothing here yet... */ diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c index 4dd1e3c..32fa57a 100644 --- a/sys/amd64/isa/npx.c +++ b/sys/amd64/isa/npx.c @@ -32,7 +32,7 @@ * SUCH DAMAGE. * * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 - * $Id: npx.c,v 1.3 1997/07/20 23:30:38 smp Exp smp $ + * $Id: npx.c,v 1.49 1997/07/21 07:57:50 fsmp Exp $ */ #include "npx.h" @@ -515,7 +515,7 @@ npxintr(unit) * Pass exception to process. */ frame = (struct intrframe *)&unit; /* XXX */ - if (ISPL(frame->if_cs) == SEL_UPL) { + if ((ISPL(frame->if_cs) == SEL_UPL) || (frame->if_eflags & PSL_VM)) { /* * Interrupt is essentially a trap, so we can afford to call * the SIGFPE handler (if any) as soon as the interrupt diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s index 18715d2..f51cd0f 100644 --- a/sys/i386/i386/exception.s +++ b/sys/i386/i386/exception.s @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: exception.s,v 1.3 1997/07/30 22:51:11 smp Exp smp $ + * $Id: exception.s,v 1.34 1997/07/31 05:43:02 fsmp Exp $ */ #include "npx.h" /* NNPX */ @@ -220,6 +220,10 @@ calltrap: subl %eax,%eax testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) jne 1f +#ifdef VM86 + testl $PSL_VM,TF_EFLAGS(%esp) + jne 1f +#endif /* VM86 */ movl _cpl,%eax 1: /* diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c index d9473cf..38bb5c0 100644 --- a/sys/i386/i386/genassym.c +++ b/sys/i386/i386/genassym.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 - * $Id: genassym.c,v 1.46 1997/05/10 08:01:13 bde Exp $ + * $Id: genassym.c,v 1.47 1997/06/07 04:36:09 bde Exp $ */ #include <stdio.h> @@ -129,6 +129,9 @@ main() printf("#define\tPCB_USERLDT %p\n", &pcb->pcb_ldt); printf("#define\tPCB_FS %p\n", &pcb->pcb_fs); printf("#define\tPCB_GS %p\n", &pcb->pcb_gs); +#ifdef VM86 + printf("#define\tPCB_EXT %p\n", &pcb->pcb_ext); +#endif /* VM86 */ #ifdef SMP printf("#define\tPCB_MPNEST %p\n", &pcb->pcb_mpnest); #endif @@ -184,5 +187,7 @@ main() printf("#define\tBI_SYMTAB %p\n", &bootinfo->bi_symtab); printf("#define\tBI_ESYMTAB %p\n", &bootinfo->bi_esymtab); + printf("#define\tGPROC0_SEL %d\n", GPROC0_SEL); + return (0); } diff --git a/sys/i386/i386/locore.s b/sys/i386/i386/locore.s index 57af042..a420cbc 100644 --- a/sys/i386/i386/locore.s +++ b/sys/i386/i386/locore.s @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 - * $Id: locore.s,v 1.92 1997/07/17 19:44:53 dyson Exp $ + * $Id: locore.s,v 1.93 1997/07/20 08:37:18 bde Exp $ * * originally from: locore.s, by William F. Jolitz * @@ -335,6 +335,18 @@ _pc98_system_parameter: call create_pagetables +#ifdef VM86 +/* + * If the CPU has support for VME, turn it on. + */ + testl $CPUID_VME, R(_cpu_feature) + jz 1f + movl %cr4, %eax + orl $CR4_VME, %eax + movl %eax, %cr4 +1: +#endif /* VM86 */ + #ifdef BDE_DEBUGGER /* * Adjust as much as possible for paging before enabling paging so that the diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index dbaacef..0205647 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 - * $Id: machdep.c,v 1.253 1997/07/20 08:37:19 bde Exp $ + * $Id: machdep.c,v 1.254 1997/08/05 00:01:10 dyson Exp $ */ #include "apm.h" @@ -106,6 +106,7 @@ #include <machine/cons.h> #include <machine/bootinfo.h> #include <machine/md_var.h> +#include <machine/pcb_ext.h> #ifdef SMP #include <machine/smp.h> #endif @@ -476,7 +477,7 @@ sendsig(catcher, sig, mask, code) * if access is denied. */ if ((grow(p, (int)fp) == FALSE) || - (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == FALSE)) { + (useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. @@ -527,6 +528,34 @@ sendsig(catcher, sig, mask, code) sf.sf_sc.sc_fp = regs->tf_ebp; sf.sf_sc.sc_pc = regs->tf_eip; sf.sf_sc.sc_ps = regs->tf_eflags; + sf.sf_sc.sc_trapno = regs->tf_trapno; + sf.sf_sc.sc_err = regs->tf_err; + + /* + * If we're a vm86 process, we want to save the segment registers. + * We also change eflags to be our emulated eflags, not the actual + * eflags. + */ + if (regs->tf_eflags & PSL_VM) { + struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; + struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; + + sf.sf_sc.sc_gs = tf->tf_vm86_gs; + sf.sf_sc.sc_fs = tf->tf_vm86_fs; + sf.sf_sc.sc_es = tf->tf_vm86_es; + sf.sf_sc.sc_ds = tf->tf_vm86_ds; + + if (vm86->vm86_has_vme == 0) + sf.sf_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) + | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); + + /* + * We should never have PSL_T set when returning from vm86 + * mode. It may be set here if we deliver a signal before + * getting to vm86 mode, so turn it off. + */ + tf->tf_eflags &= ~(PSL_VM | PSL_T | PSL_VIF | PSL_VIP); + } /* * Copy the sigframe out to the user's stack. @@ -537,11 +566,10 @@ sendsig(catcher, sig, mask, code) * ...Kill the process. */ sigexit(p, SIGILL); - }; + } regs->tf_esp = (int)fp; regs->tf_eip = (int)(((char *)PS_STRINGS) - *(p->p_sysent->sv_szsigcode)); - regs->tf_eflags &= ~PSL_VM; regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; @@ -583,42 +611,78 @@ sigreturn(p, uap, retval) if (useracc((caddr_t)fp, sizeof (*fp), B_WRITE) == 0) return(EFAULT); - /* - * Don't allow users to change privileged or reserved flags. - */ -#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) eflags = scp->sc_ps; - /* - * XXX do allow users to change the privileged flag PSL_RF. The - * cpu sets PSL_RF in tf_eflags for faults. Debuggers should - * sometimes set it there too. tf_eflags is kept in the signal - * context during signal handling and there is no other place - * to remember it, so the PSL_RF bit may be corrupted by the - * signal handler without us knowing. Corruption of the PSL_RF - * bit at worst causes one more or one less debugger trap, so - * allowing it is fairly harmless. - */ - if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { + if (eflags & PSL_VM) { + struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; + struct vm86_kernel *vm86; + + /* + * if pcb_ext == 0 or vm86_inited == 0, the user hasn't + * set up the vm86 area, and we can't enter vm86 mode. + */ + if (p->p_addr->u_pcb.pcb_ext == 0) + return (EINVAL); + vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; + if (vm86->vm86_inited == 0) + return (EINVAL); + + /* go back to user mode if both flags are set */ + if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) + trapsignal(p, SIGBUS, 0); + +#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF) +#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF) + if (vm86->vm86_has_vme) { + eflags = (tf->tf_eflags & ~VME_USERCHANGE) | + (eflags & VME_USERCHANGE) | PSL_VM; + } else { + vm86->vm86_eflags = eflags; /* save VIF, VIP */ + eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; + } + tf->tf_vm86_ds = scp->sc_ds; + tf->tf_vm86_es = scp->sc_es; + tf->tf_vm86_fs = scp->sc_fs; + tf->tf_vm86_gs = scp->sc_gs; + tf->tf_ds = _udatasel; + tf->tf_es = _udatasel; + } else { + /* + * Don't allow users to change privileged or reserved flags. + */ +#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) + /* + * XXX do allow users to change the privileged flag PSL_RF. + * The cpu sets PSL_RF in tf_eflags for faults. Debuggers + * should sometimes set it there too. tf_eflags is kept in + * the signal context during signal handling and there is no + * other place to remember it, so the PSL_RF bit may be + * corrupted by the signal handler without us knowing. + * Corruption of the PSL_RF bit at worst causes one more or + * one less debugger trap, so allowing it is fairly harmless. + */ + if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { #ifdef DEBUG - printf("sigreturn: eflags = 0x%x\n", eflags); + printf("sigreturn: eflags = 0x%x\n", eflags); #endif - return(EINVAL); - } + return(EINVAL); + } - /* - * Don't allow users to load a valid privileged %cs. Let the - * hardware check for invalid selectors, excess privilege in - * other selectors, invalid %eip's and invalid %esp's. - */ + /* + * Don't allow users to load a valid privileged %cs. Let the + * hardware check for invalid selectors, excess privilege in + * other selectors, invalid %eip's and invalid %esp's. + */ #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) - if (!CS_SECURE(scp->sc_cs)) { + if (!CS_SECURE(scp->sc_cs)) { #ifdef DEBUG - printf("sigreturn: cs = 0x%x\n", scp->sc_cs); + printf("sigreturn: cs = 0x%x\n", scp->sc_cs); #endif - trapsignal(p, SIGBUS, T_PROTFLT); - return(EINVAL); + trapsignal(p, SIGBUS, T_PROTFLT); + return(EINVAL); + } + regs->tf_ds = scp->sc_ds; + regs->tf_es = scp->sc_es; } - /* restore scratch registers */ regs->tf_eax = scp->sc_eax; regs->tf_ebx = scp->sc_ebx; @@ -627,8 +691,6 @@ sigreturn(p, uap, retval) regs->tf_esi = scp->sc_esi; regs->tf_edi = scp->sc_edi; regs->tf_cs = scp->sc_cs; - regs->tf_ds = scp->sc_ds; - regs->tf_es = scp->sc_es; regs->tf_ss = scp->sc_ss; regs->tf_isp = scp->sc_isp; @@ -787,6 +849,10 @@ struct region_descriptor r_gdt, r_idt; extern struct i386tss common_tss; /* One tss per cpu */ #else struct i386tss common_tss; +#ifdef VM86 +struct segment_descriptor common_tssd; +u_int private_tss = 0; /* flag indicating private tss */ +#endif /* VM86 */ #endif static struct i386tss dblfault_tss; @@ -794,10 +860,6 @@ static char dblfault_stack[PAGE_SIZE]; extern struct user *proc0paddr; -#ifdef TSS_IS_CACHED /* cpu_switch helper */ -struct segment_descriptor *tssptr; -int gsel_tss; -#endif /* software prototypes -- in more palatable form */ struct soft_segment_descriptor gdt_segs[ @@ -1005,9 +1067,8 @@ init386(first) int x; unsigned biosbasemem, biosextmem; struct gate_descriptor *gdp; -#ifndef TSS_IS_CACHED int gsel_tss; -#endif + struct isa_device *idp; #ifndef SMP /* table descriptors - used to load tables by microp */ @@ -1044,6 +1105,9 @@ init386(first) #endif for (x = 0; x < NGDT1; x++) ssdtosd(&gdt_segs[x], &gdt[x].sd); +#ifdef VM86 + common_tssd = gdt[GPROC0_SEL].sd; +#endif /* VM86 */ #ifdef SMP /* @@ -1380,7 +1444,11 @@ init386(first) msgbufmapped = 1; /* make an initial tss so cpu can get interrupt stack on syscall! */ +#ifdef VM86 + common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; +#else common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE; +#endif /* VM86 */ common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; common_tss.tss_ioopt = (sizeof common_tss) << 16; gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); @@ -1398,10 +1466,6 @@ init386(first) dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); -#ifdef TSS_IS_CACHED /* cpu_switch helper */ - tssptr = &gdt[GPROC0_SEL].sd; -#endif - /* make a call gate to reenter kernel with */ gdp = &ldt[LSYS5CALLS_SEL].gd; @@ -1426,6 +1490,7 @@ init386(first) proc0.p_addr->u_pcb.pcb_flags = 0; proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; proc0.p_addr->u_pcb.pcb_mpnest = 1; + proc0.p_addr->u_pcb.pcb_ext = 0; } int diff --git a/sys/i386/i386/support.s b/sys/i386/i386/support.s index 3f5c5fb..811e068 100644 --- a/sys/i386/i386/support.s +++ b/sys/i386/i386/support.s @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: support.s,v 1.54 1997/06/22 16:03:33 peter Exp $ + * $Id: support.s,v 1.55 1997/07/17 04:33:40 dyson Exp $ */ #include "npx.h" @@ -1527,7 +1527,7 @@ ENTRY(rcr4) movl %cr4,%eax ret -/* void load_cr3(caddr_t cr3) */ +/* void load_cr4(caddr_t cr4) */ ENTRY(load_cr4) movl 4(%esp),%eax movl %eax,%cr4 diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index 14d0a8a..61b1dfa 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -33,7 +33,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: swtch.s,v 1.5 1997/08/04 17:17:29 smp Exp smp $ + * $Id: swtch.s,v 1.58 1997/08/04 17:31:43 fsmp Exp $ */ #include "npx.h" @@ -257,16 +257,27 @@ _idle: movl %ecx,%cr3 /* update common_tss.tss_esp0 pointer */ +#ifdef VM86 + movl $GPROC0_SEL, %esi +#endif /* VM86 */ movl $_common_tss, %eax movl %esp, TSS_ESP0(%eax) -#ifdef TSS_IS_CACHED /* example only */ - /* Reload task register to force reload of selector */ - movl _tssptr, %ebx - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ - movl _gsel_tss, %ebx - ltr %bx -#endif +#ifdef VM86 + btrl %esi, _private_tss + je 1f + movl $_common_tssd, %edi + + /* move correct tss descriptor into GDT slot, then reload tr */ + leal _gdt(,%esi,8), %ebx /* entry in GDT */ + movl 0(%edi), %eax + movl %eax, 0(%ebx) + movl 4(%edi), %eax + movl %eax, 4(%ebx) + shll $3, %esi /* GSEL(entry, SEL_KPL) */ + ltr %si +1: +#endif /* VM86 */ sti @@ -472,33 +483,41 @@ swtch_com: movl %ebx, %cr3 #endif /* SMP */ -#ifdef HOW_TO_SWITCH_TSS /* example only */ - /* Fix up tss pointer to floating pcb/stack structure */ - /* XXX probably lots faster to store the 64 bits of tss entry - * in the pcb somewhere and copy them on activation. - */ - movl _tssptr, %ebx - movl %edx, %eax /* edx = pcb/tss */ - movw %ax, 2(%ebx) /* store bits 0->15 */ - roll $16, %eax /* swap upper and lower */ - movb %al, 4(%ebx) /* store bits 16->23 */ - movb %ah, 7(%ebx) /* store bits 24->31 */ - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ +#ifdef VM86 + movl $GPROC0_SEL, %esi + cmpl $0, PCB_EXT(%edx) /* has pcb extension? */ + je 1f + btsl %esi, _private_tss /* mark use of private tss */ + movl PCB_EXT(%edx), %edi /* new tss descriptor */ + jmp 2f +1: #endif /* update common_tss.tss_esp0 pointer */ movl $_common_tss, %eax movl %edx, %ebx /* pcb */ +#ifdef VM86 + addl $(UPAGES * PAGE_SIZE - 16), %ebx +#else addl $(UPAGES * PAGE_SIZE), %ebx +#endif /* VM86 */ movl %ebx, TSS_ESP0(%eax) -#ifdef TSS_IS_CACHED /* example only */ - /* Reload task register to force reload of selector */ - movl _tssptr, %ebx - andb $~0x02, 5(%ebx) /* Flip 386BSY -> 386TSS */ - movl _gsel_tss, %ebx - ltr %bx -#endif +#ifdef VM86 + btrl %esi, _private_tss + je 3f + movl $_common_tssd, %edi +2: + /* move correct tss descriptor into GDT slot, then reload tr */ + leal _gdt(,%esi,8), %ebx /* entry in GDT */ + movl 0(%edi), %eax + movl %eax, 0(%ebx) + movl 4(%edi), %eax + movl %eax, 4(%ebx) + shll $3, %esi /* GSEL(entry, SEL_KPL) */ + ltr %si +3: +#endif /* VM86 */ /* restore context */ movl PCB_EBX(%edx),%ebx diff --git a/sys/i386/i386/sys_machdep.c b/sys/i386/i386/sys_machdep.c index 4fc52a2..be355bb 100644 --- a/sys/i386/i386/sys_machdep.c +++ b/sys/i386/i386/sys_machdep.c @@ -31,7 +31,7 @@ * SUCH DAMAGE. * * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 - * $Id: sys_machdep.c,v 1.21 1997/02/22 09:32:53 peter Exp $ + * $Id: sys_machdep.c,v 1.22 1997/07/20 08:37:23 bde Exp $ * */ @@ -51,6 +51,7 @@ #include <machine/cpu.h> #include <machine/sysarch.h> +#include <machine/pcb_ext.h> #include <vm/vm_kern.h> /* for kernel_map */ @@ -66,6 +67,12 @@ void set_user_ldt __P((struct pcb *pcb)); static int i386_get_ldt __P((struct proc *, char *, int *)); static int i386_set_ldt __P((struct proc *, char *, int *)); #endif +#ifdef VM86 +static int i386_get_ioperm __P((struct proc *, char *, int *)); +static int i386_set_ioperm __P((struct proc *, char *, int *)); +int i386_extend_pcb __P((struct proc *)); +int (*vm86_sysarch) __P((struct proc *, char *, int *)); +#endif #ifndef _SYS_SYSPROTO_H_ struct sysarch_args { @@ -92,12 +99,160 @@ sysarch(p, uap, retval) error = i386_set_ldt(p, uap->parms, retval); break; #endif +#ifdef VM86 + case I386_GET_IOPERM: + error = i386_get_ioperm(p, uap->parms, retval); + break; + case I386_SET_IOPERM: + error = i386_set_ioperm(p, uap->parms, retval); + break; + case I386_VM86: + if (vm86_sysarch) { + error = (*vm86_sysarch)(p, uap->parms, retval); + break; + } + /* FALL THROUGH */ +#endif default: error = EINVAL; break; } - return(error); + return (error); +} + +#ifdef VM86 +int +i386_extend_pcb(struct proc *p) +{ + int i, offset; + u_long *addr; + struct pcb_ext *ext; + struct segment_descriptor sd; + struct soft_segment_descriptor ssd = { + 0, /* segment base address (overwritten) */ + ctob(IOPAGES + 1) - 1, /* length */ + SDT_SYS386TSS, /* segment type */ + 0, /* priority level */ + 1, /* descriptor present */ + 0, 0, + 0, /* default 32 size */ + 0 /* granularity */ + }; + + ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1)); + if (ext == 0) + return (ENOMEM); + p->p_addr->u_pcb.pcb_ext = ext; + bzero(&ext->ext_tss, sizeof(struct i386tss)); + ext->ext_tss.tss_esp0 = (unsigned)p->p_addr + ctob(UPAGES) - 16; + ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); + /* + * The last byte of the i/o map must be followed by an 0xff byte. + * We arbitrarily allocate 16 bytes here, to keep the starting + * address on a doubleword boundary. + */ + offset = PAGE_SIZE - 16; + ext->ext_tss.tss_ioopt = + (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16; + ext->ext_iomap = (caddr_t)ext + offset; + ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32; + ext->ext_vm86.vm86_inited = 0; + + addr = (u_long *)ext->ext_vm86.vm86_intmap; + for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++) + *addr++ = ~0; + + ssd.ssd_base = (unsigned)&ext->ext_tss; + ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext); + ssdtosd(&ssd, &ext->ext_tssd); + + /* switch to the new TSS after syscall completes */ + need_resched(); + + return 0; +} + +struct i386_ioperm_args { + u_short start; + u_short length; + u_char enable; +}; + +static int +i386_set_ioperm(p, args, retval) + struct proc *p; + char *args; + int *retval; +{ + int i, error = 0; + struct i386_ioperm_args ua; + char *iomap; + + if (error = copyin(args, &ua, sizeof(struct i386_ioperm_args))) + return (error); + + /* Only root can do this */ + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + /* + * XXX + * While this is restricted to root, we should probably figure out + * whether any other driver is using this i/o address, as so not to + * cause confusion. This probably requires a global 'usage registry'. + */ + + if (p->p_addr->u_pcb.pcb_ext == 0) + if (error = i386_extend_pcb(p)) + return (error); + iomap = (char *)p->p_addr->u_pcb.pcb_ext->ext_iomap; + + if ((int)(ua.start + ua.length) > 0xffff) + return (EINVAL); + + for (i = ua.start; i < (int)(ua.start + ua.length) + 1; i++) { + if (ua.enable) + iomap[i >> 3] &= ~(1 << (i & 7)); + else + iomap[i >> 3] |= (1 << (i & 7)); + } + return (error); +} + +static int +i386_get_ioperm(p, args, retval) + struct proc *p; + char *args; + int *retval; +{ + int i, state, error = 0; + struct i386_ioperm_args ua; + char *iomap; + + if (error = copyin(args, &ua, sizeof(struct i386_ioperm_args))) + return (error); + + if (p->p_addr->u_pcb.pcb_ext == 0) { + ua.length = 0; + goto done; + } + + iomap = (char *)p->p_addr->u_pcb.pcb_ext->ext_iomap; + + state = (iomap[i >> 3] >> (i & 7)) & 1; + ua.enable = !state; + ua.length = 1; + + for (i = ua.start + 1; i < 0x10000; i++) { + if (state != ((iomap[i >> 3] >> (i & 7)) & 1)) + break; + ua.length++; + } + +done: + error = copyout(&ua, args, sizeof(struct i386_ioperm_args)); + return (error); } +#endif /* VM86 */ #ifdef USER_LDT /* diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c index 6e8c0c3..7110933 100644 --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 - * $Id: trap.c,v 1.100 1997/06/22 16:03:37 peter Exp $ + * $Id: trap.c,v 1.101 1997/07/20 08:37:23 bde Exp $ */ /* @@ -85,6 +85,7 @@ extern struct i386tss common_tss; +int (*vm86_emulate) __P((struct vm86frame *)); int (*pmath_emulate) __P((struct trapframe *)); extern void trap __P((struct trapframe frame)); @@ -192,7 +193,7 @@ trap(frame) type = frame.tf_trapno; code = frame.tf_err; - if (ISPL(frame.tf_cs) == SEL_UPL) { + if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) { /* user trap */ sticks = p->p_sticks; @@ -225,9 +226,22 @@ trap(frame) } goto out; + /* + * The following two traps can happen in + * vm86 mode, and, if so, we want to handle + * them specially. + */ case T_PROTFLT: /* general protection fault */ - case T_SEGNPFLT: /* segment not present fault */ case T_STKFLT: /* stack fault */ + if (vm86_emulate && (frame.tf_eflags & PSL_VM)) { + i = (*vm86_emulate)((struct vm86frame *)&frame); + if (i == 0) + goto out; + break; + } + /* FALL THROUGH */ + + case T_SEGNPFLT: /* segment not present fault */ case T_TSSFLT: /* invalid TSS fault */ case T_DOUBLEFLT: /* double fault */ default: @@ -694,6 +708,7 @@ trap_fatal(frame) if (type <= MAX_TRAP_MSG) printf("\n\nFatal trap %d: %s while in %s mode\n", type, trap_msg[type], + frame->tf_eflags & PSL_VM ? "vm86" : ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); #ifdef SMP printf("cpuid = %d\n", cpuid); @@ -707,7 +722,7 @@ trap_fatal(frame) } printf("instruction pointer = 0x%x:0x%x\n", frame->tf_cs & 0xffff, frame->tf_eip); - if (ISPL(frame->tf_cs) == SEL_UPL) { + if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { ss = frame->tf_ss & 0xffff; esp = frame->tf_esp; } else { @@ -946,7 +961,7 @@ bad: break; } - if (frame.tf_eflags & PSL_T) { + if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { /* Traced syscall. */ frame.tf_eflags &= ~PSL_T; trapsignal(p, SIGTRAP, 0); diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index 23198d9..2db2844 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -38,7 +38,7 @@ * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ - * $Id: vm_machdep.c,v 1.83 1997/06/26 02:04:34 tegge Exp $ + * $Id: vm_machdep.c,v 1.84 1997/07/20 08:37:24 bde Exp $ */ #include "npx.h" @@ -55,6 +55,7 @@ #include <machine/clock.h> #include <machine/cpu.h> #include <machine/md_var.h> +#include <machine/pcb_ext.h> #include <vm/vm.h> #include <vm/vm_param.h> @@ -603,6 +604,13 @@ cpu_fork(p1, p2) * pcb2->pcb_onfault: cloned above (always NULL here?). */ +#ifdef VM86 + /* + * XXX don't copy the i/o pages. this should probably be fixed. + */ + pcb2->pcb_ext = 0; +#endif + #ifdef USER_LDT /* Copy the LDT, if necessary. */ if (pcb2->pcb_ldt != 0) { @@ -650,15 +658,25 @@ void cpu_exit(p) register struct proc *p; { -#ifdef USER_LDT - struct pcb *pcb; +#if defined(USER_LDT) || defined(VM86) + struct pcb *pcb = &p->p_addr->u_pcb; #endif #if NNPX > 0 npxexit(p); #endif /* NNPX */ +#ifdef VM86 + if (pcb->pcb_ext != 0) { + /* + * XXX do we need to move the TSS off the allocated pages + * before freeing them? (not done here) + */ + kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext, + ctob(IOPAGES + 1)); + pcb->pcb_ext = 0; + } +#endif #ifdef USER_LDT - pcb = &p->p_addr->u_pcb; if (pcb->pcb_ldt != 0) { if (pcb == curpcb) lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); diff --git a/sys/i386/include/cpu.h b/sys/i386/include/cpu.h index d28079a..0aedc2a 100644 --- a/sys/i386/include/cpu.h +++ b/sys/i386/include/cpu.h @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)cpu.h 5.4 (Berkeley) 5/9/91 - * $Id: cpu.h,v 1.28 1997/02/22 09:34:04 peter Exp $ + * $Id: cpu.h,v 1.29 1997/04/07 07:15:58 peter Exp $ */ #ifndef _MACHINE_CPU_H_ @@ -43,6 +43,7 @@ /* * Definitions unique to i386 cpu support. */ +#include <machine/psl.h> #include <machine/frame.h> #include <machine/segments.h> @@ -57,7 +58,9 @@ #define cpu_setstack(p, ap) ((p)->p_md.md_regs[SP] = (ap)) #define cpu_set_init_frame(p, fp) ((p)->p_md.md_regs = (fp)) -#define CLKF_USERMODE(framep) (ISPL((framep)->cf_cs) == SEL_UPL) +#define CLKF_USERMODE(framep) \ + ((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM)) + #define CLKF_INTR(framep) (intr_nesting_level >= 2) #if 0 /* diff --git a/sys/i386/include/frame.h b/sys/i386/include/frame.h index 43201f2..05092c2 100644 --- a/sys/i386/include/frame.h +++ b/sys/i386/include/frame.h @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)frame.h 5.2 (Berkeley) 1/18/91 - * $Id$ + * $Id: frame.h,v 1.14 1997/02/22 09:34:38 peter Exp $ */ #ifndef _MACHINE_FRAME_H_ @@ -72,6 +72,35 @@ struct trapframe { int tf_ss; }; +/* Superset of trap frame, for traps from virtual-8086 mode */ + +struct trapframe_vm86 { + int tf_es; + int tf_ds; + int tf_edi; + int tf_esi; + int tf_ebp; + int tf_isp; + int tf_ebx; + int tf_edx; + int tf_ecx; + int tf_eax; + int tf_trapno; + /* below portion defined in 386 hardware */ + int tf_err; + int tf_eip; + int tf_cs; + int tf_eflags; + /* below only when crossing rings (e.g. user to kernel) */ + int tf_esp; + int tf_ss; + /* below only when switching out of VM86 mode */ + int tf_vm86_es; + int tf_vm86_ds; + int tf_vm86_fs; + int tf_vm86_gs; +}; + /* Interrupt stack frame */ struct intrframe { diff --git a/sys/i386/include/md_var.h b/sys/i386/include/md_var.h index 337c0c4..b5f6625 100644 --- a/sys/i386/include/md_var.h +++ b/sys/i386/include/md_var.h @@ -26,7 +26,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: md_var.h,v 1.14 1997/06/07 04:36:05 bde Exp $ + * $Id: md_var.h,v 1.15 1997/06/15 02:02:55 wollman Exp $ */ #ifndef _MACHINE_MD_VAR_H_ @@ -68,7 +68,6 @@ void doreti_popl_es __P((void)) __asm(__STRING(doreti_popl_es)); void doreti_popl_es_fault __P((void)) __asm(__STRING(doreti_popl_es_fault)); int fill_regs __P((struct proc *p, struct reg *regs)); void fillw __P((int /*u_short*/ pat, void *base, size_t cnt)); -int fusword __P((void *base)); u_long kvtop __P((void *addr)); void setidt __P((int idx, alias_for_inthand_t *func, int typ, int dpl, int selec)); diff --git a/sys/i386/include/param.h b/sys/i386/include/param.h index 1015164..4a7d19e 100644 --- a/sys/i386/include/param.h +++ b/sys/i386/include/param.h @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)param.h 5.8 (Berkeley) 6/28/91 - * $Id: param.h,v 1.31 1997/08/04 19:14:47 fsmp Exp $ + * $Id: param.h,v 1.32 1997/08/07 05:15:52 dyson Exp $ */ #ifndef _MACHINE_PARAM_H_ @@ -72,6 +72,7 @@ #define BLKDEV_IOSIZE 2048 #define MAXPHYS (64 * 1024) /* max raw I/O transfer size */ +#define IOPAGES 2 /* pages of i/o permission bitmap */ #define UPAGES 2 /* pages of u-area */ #define UPAGES_HOLE 2 /* pages of "hole" at top of user space where */ /* the upages used to be. DO NOT CHANGE! */ diff --git a/sys/i386/include/pcb.h b/sys/i386/include/pcb.h index 18a9af1..26acab9 100644 --- a/sys/i386/include/pcb.h +++ b/sys/i386/include/pcb.h @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)pcb.h 5.10 (Berkeley) 5/12/91 - * $Id: pcb.h,v 1.22 1997/06/07 04:36:05 bde Exp $ + * $Id: pcb.h,v 1.23 1997/06/22 16:03:51 peter Exp $ */ #ifndef _I386_PCB_H_ @@ -45,6 +45,7 @@ */ #include <machine/tss.h> #include <machine/npx.h> +#include <machine/pcb_ext.h> struct pcb { int pcb_cr3; @@ -63,11 +64,8 @@ struct pcb { u_long pcb_mpnest; int pcb_fs; int pcb_gs; - u_long __pcb_spare[5]; /* adjust to avoid core dump size changes */ -#if 0 /* some day we may switch between procs that have their own i386tss */ - struct i386tss pcb_tss; - u_char pcb_iomap[NPORT/sizeof(u_char)]; /* i/o port bitmap */ -#endif + struct pcb_ext *pcb_ext; /* optional pcb extension */ + u_long __pcb_spare[1]; /* adjust to avoid core dump size changes */ }; /* diff --git a/sys/i386/include/signal.h b/sys/i386/include/signal.h index 0e860ba..51dc196 100644 --- a/sys/i386/include/signal.h +++ b/sys/i386/include/signal.h @@ -31,7 +31,7 @@ * SUCH DAMAGE. * * @(#)signal.h 8.1 (Berkeley) 6/11/93 - * $Id$ + * $Id: signal.h,v 1.7 1997/02/22 09:35:12 peter Exp $ */ #ifndef _MACHINE_SIGNAL_H_ @@ -72,10 +72,15 @@ struct sigcontext { int sc_edx; int sc_ecx; int sc_eax; + int sc_gs; + int sc_fs; # define sc_sp sc_esp # define sc_fp sc_ebp # define sc_pc sc_eip # define sc_ps sc_efl +# define sc_eflags sc_efl + int sc_trapno; + int sc_err; }; #endif /* !_ANSI_SOURCE && !_POSIX_SOURCE */ diff --git a/sys/i386/include/sysarch.h b/sys/i386/include/sysarch.h index 746cef1..80694a4 100644 --- a/sys/i386/include/sysarch.h +++ b/sys/i386/include/sysarch.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id$ + * $Id: sysarch.h,v 1.6 1997/02/22 09:35:18 peter Exp $ */ /* @@ -43,6 +43,11 @@ #define I386_GET_LDT 0 #define I386_SET_LDT 1 + /* I386_IOPL */ +#define I386_GET_IOPERM 3 +#define I386_SET_IOPERM 4 + /* xxxxx */ +#define I386_VM86 6 #ifdef KERNEL /* nothing here yet... */ diff --git a/sys/i386/isa/ipl.s b/sys/i386/isa/ipl.s index 797aa8f..45b0de5 100644 --- a/sys/i386/isa/ipl.s +++ b/sys/i386/isa/ipl.s @@ -36,7 +36,7 @@ * * @(#)ipl.s * - * $Id: ipl.s,v 1.3 1997/07/30 22:46:49 smp Exp smp $ + * $Id: ipl.s,v 1.5 1997/07/31 05:42:06 fsmp Exp $ */ @@ -120,6 +120,23 @@ doreti_exit: movl %eax,_cpl decb _intr_nesting_level MEXITCOUNT +#ifdef VM86 + /* + * XXX + * Sometimes when attempting to return to vm86 mode, cpl is not + * being reset to 0, so here we force it to 0 before returning to + * vm86 mode. doreti_stop is a convenient place to set a breakpoint. + * When the cpl problem is solved, this code can disappear. + */ + cmpl $0,_cpl + je 1f + testl $PSL_VM,TF_EFLAGS(%esp) + je 1f +doreti_stop: + movl $0,_cpl + nop +1: +#endif /* VM86 */ #if 0 REL_MPLOCK #else @@ -198,6 +215,7 @@ swi_ast: addl $8,%esp /* discard raddr & cpl to get trap frame */ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) je swi_ast_phantom +swi_ast_user: movl $T_ASTFLT,(2+8+0)*4(%esp) movb $0,_intr_nesting_level /* finish becoming a trap handler */ call _trap @@ -207,6 +225,14 @@ swi_ast: ALIGN_TEXT swi_ast_phantom: +#ifdef VM86 + /* + * check for ast from vm86 mode. Placed down here so the jumps do + * not get taken for mainline code. + */ + testl $PSL_VM,TF_EFLAGS(%esp) + jne swi_ast_user +#endif /* VM86 */ /* * These happen when there is an interrupt in a trap handler before * ASTs can be masked or in an lcall handler before they can be diff --git a/sys/i386/isa/npx.c b/sys/i386/isa/npx.c index 4dd1e3c..32fa57a 100644 --- a/sys/i386/isa/npx.c +++ b/sys/i386/isa/npx.c @@ -32,7 +32,7 @@ * SUCH DAMAGE. * * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 - * $Id: npx.c,v 1.3 1997/07/20 23:30:38 smp Exp smp $ + * $Id: npx.c,v 1.49 1997/07/21 07:57:50 fsmp Exp $ */ #include "npx.h" @@ -515,7 +515,7 @@ npxintr(unit) * Pass exception to process. */ frame = (struct intrframe *)&unit; /* XXX */ - if (ISPL(frame->if_cs) == SEL_UPL) { + if ((ISPL(frame->if_cs) == SEL_UPL) || (frame->if_eflags & PSL_VM)) { /* * Interrupt is essentially a trap, so we can afford to call * the SIGFPE handler (if any) as soon as the interrupt diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index 6e8c0c3..7110933 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 - * $Id: trap.c,v 1.100 1997/06/22 16:03:37 peter Exp $ + * $Id: trap.c,v 1.101 1997/07/20 08:37:23 bde Exp $ */ /* @@ -85,6 +85,7 @@ extern struct i386tss common_tss; +int (*vm86_emulate) __P((struct vm86frame *)); int (*pmath_emulate) __P((struct trapframe *)); extern void trap __P((struct trapframe frame)); @@ -192,7 +193,7 @@ trap(frame) type = frame.tf_trapno; code = frame.tf_err; - if (ISPL(frame.tf_cs) == SEL_UPL) { + if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) { /* user trap */ sticks = p->p_sticks; @@ -225,9 +226,22 @@ trap(frame) } goto out; + /* + * The following two traps can happen in + * vm86 mode, and, if so, we want to handle + * them specially. + */ case T_PROTFLT: /* general protection fault */ - case T_SEGNPFLT: /* segment not present fault */ case T_STKFLT: /* stack fault */ + if (vm86_emulate && (frame.tf_eflags & PSL_VM)) { + i = (*vm86_emulate)((struct vm86frame *)&frame); + if (i == 0) + goto out; + break; + } + /* FALL THROUGH */ + + case T_SEGNPFLT: /* segment not present fault */ case T_TSSFLT: /* invalid TSS fault */ case T_DOUBLEFLT: /* double fault */ default: @@ -694,6 +708,7 @@ trap_fatal(frame) if (type <= MAX_TRAP_MSG) printf("\n\nFatal trap %d: %s while in %s mode\n", type, trap_msg[type], + frame->tf_eflags & PSL_VM ? "vm86" : ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); #ifdef SMP printf("cpuid = %d\n", cpuid); @@ -707,7 +722,7 @@ trap_fatal(frame) } printf("instruction pointer = 0x%x:0x%x\n", frame->tf_cs & 0xffff, frame->tf_eip); - if (ISPL(frame->tf_cs) == SEL_UPL) { + if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { ss = frame->tf_ss & 0xffff; esp = frame->tf_esp; } else { @@ -946,7 +961,7 @@ bad: break; } - if (frame.tf_eflags & PSL_T) { + if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { /* Traced syscall. */ frame.tf_eflags &= ~PSL_T; trapsignal(p, SIGTRAP, 0); diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 7cded7f..44dd713 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)systm.h 8.7 (Berkeley) 3/29/95 - * $Id: systm.h,v 1.52 1997/05/31 09:43:22 peter Exp $ + * $Id: systm.h,v 1.53 1997/07/21 16:43:48 bde Exp $ */ #ifndef _SYS_SYSTM_H_ @@ -120,6 +120,7 @@ int subyte __P((void *base, int byte)); int suibyte __P((void *base, int byte)); int fuword __P((const void *base)); int suword __P((void *base, int word)); +int fusword __P((void *base)); int susword __P((void *base, int word)); struct timeval; |