summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1997-08-09 01:38:03 +0000
committerdyson <dyson@FreeBSD.org>1997-08-09 01:38:03 +0000
commitdede28832bba6a9de7a428ff58df92439bddbc9c (patch)
tree982301f619f290a9f8e831099958f99730825d1b
parent9b3d3ce7fac3f89596fec9ee8623b3f5774fbb3c (diff)
downloadFreeBSD-src-dede28832bba6a9de7a428ff58df92439bddbc9c.zip
FreeBSD-src-dede28832bba6a9de7a428ff58df92439bddbc9c.tar.gz
Add the code that represents most of the interface between the VM86
pseudo-machine and the rest of the FreeBSD kernel. Submitted by: Jonathan Lemon <jlemon@americantv.com>
-rw-r--r--sys/i386/i386/vm86.c434
1 files changed, 434 insertions, 0 deletions
diff --git a/sys/i386/i386/vm86.c b/sys/i386/i386/vm86.c
new file mode 100644
index 0000000..58cbd75
--- /dev/null
+++ b/sys/i386/i386/vm86.c
@@ -0,0 +1,434 @@
+/*-
+ * Copyright (c) 1997 Jonathan Lemon
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/unistd.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/shm.h>
+#include <sys/sysproto.h>
+#include <sys/filedesc.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_extern.h>
+
+#include <sys/user.h>
+
+#include <machine/cpu.h>
+#include <machine/psl.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+
+extern int emulate_vm86 __P((struct vm86frame *));
+extern int i386_vm86 __P((struct proc *p, char *args, int *retval));
+
+extern int i386_extend_pcb __P((struct proc *));
+
+#define CLI 0xfa
+#define STI 0xfb
+#define PUSHF 0x9c
+#define POPF 0x9d
+#define INTn 0xcd
+#define IRET 0xcf
+#define OPERAND_SIZE_PREFIX 0x66
+#define ADDRESS_SIZE_PREFIX 0x67
+#define PUSH_MASK ~(PSL_VM | PSL_RF | PSL_I)
+#define POP_MASK ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
+
+static inline caddr_t
+MAKE_ADDR(u_short sel, u_short off)
+{
+ return ((caddr_t)((sel << 4) + off));
+}
+
+static inline void
+GET_VEC(u_long vec, u_short *sel, u_short *off)
+{
+ *sel = vec >> 16;
+ *off = vec & 0xffff;
+}
+
+static inline u_long
+MAKE_VEC(u_short sel, u_short off)
+{
+ return ((sel << 16) | off);
+}
+
+static inline void
+PUSH(u_short x, struct vm86frame *vmf)
+{
+ vmf->vmf_sp -= 2;
+ susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
+}
+
+static inline void
+PUSHL(u_long x, struct vm86frame *vmf)
+{
+ vmf->vmf_sp -= 4;
+ suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
+}
+
+static inline u_short
+POP(struct vm86frame *vmf)
+{
+ u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
+
+ vmf->vmf_sp += 2;
+ return (x);
+}
+
+static inline u_long
+POPL(struct vm86frame *vmf)
+{
+ u_long x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
+
+ vmf->vmf_sp += 4;
+ return (x);
+}
+
+int
+emulate_vm86(vmf)
+ struct vm86frame *vmf;
+{
+ struct vm86_kernel *vm86;
+ caddr_t addr;
+ u_char i_byte;
+ u_long temp_flags;
+ int inc_ip = 1;
+ int retcode = 0;
+
+ /*
+ * pcb_ext contains the address of the extension area, or zero if
+ * the extension is not present. (This check should not be needed,
+ * as we can't enter vm86 mode until we set up an extension area)
+ */
+ if (curpcb->pcb_ext == 0)
+ return (SIGBUS);
+ vm86 = &curpcb->pcb_ext->ext_vm86;
+
+ if (vmf->vmf_eflags & PSL_T)
+ retcode = SIGTRAP;
+
+ addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
+ i_byte = fubyte(addr);
+ if (i_byte == ADDRESS_SIZE_PREFIX) {
+ i_byte = fubyte(++addr);
+ inc_ip++;
+ }
+
+ if (vm86->vm86_has_vme) {
+ switch (i_byte) {
+ case OPERAND_SIZE_PREFIX:
+ i_byte = fubyte(++addr);
+ inc_ip++;
+ switch (i_byte) {
+ case PUSHF:
+ if (vmf->vmf_eflags & PSL_VIF)
+ PUSHL((vmf->vmf_eflags & PUSH_MASK)
+ | PSL_IOPL | PSL_I, vmf);
+ else
+ PUSHL((vmf->vmf_eflags & PUSH_MASK)
+ | PSL_IOPL, vmf);
+ vmf->vmf_ip += inc_ip;
+ return (0);
+
+ case POPF:
+ temp_flags = POPL(vmf) & POP_MASK;
+ vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
+ | temp_flags | PSL_VM | PSL_I;
+ vmf->vmf_ip += inc_ip;
+ if (temp_flags & PSL_I) {
+ vmf->vmf_eflags |= PSL_VIF;
+ if (vmf->vmf_eflags & PSL_VIP)
+ break;
+ } else {
+ vmf->vmf_eflags &= ~PSL_VIF;
+ }
+ return (0);
+ }
+ break;
+
+ /* VME faults here if VIP is set, but does not set VIF. */
+ case STI:
+ vmf->vmf_eflags |= PSL_VIF;
+ vmf->vmf_ip += inc_ip;
+ if ((vmf->vmf_eflags & PSL_VIP) == 0) {
+ uprintf("fatal sti\n");
+ return (SIGKILL);
+ }
+ break;
+
+ /* VME if no redirection support */
+ case INTn:
+ break;
+
+ /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
+ case POPF:
+ temp_flags = POP(vmf) & POP_MASK;
+ vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
+ | temp_flags | PSL_VM | PSL_I;
+ vmf->vmf_ip += inc_ip;
+ if (temp_flags & PSL_I) {
+ vmf->vmf_eflags |= PSL_VIF;
+ if (vmf->vmf_eflags & PSL_VIP)
+ break;
+ } else {
+ vmf->vmf_eflags &= ~PSL_VIF;
+ }
+ return (retcode);
+
+ /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
+ case IRET:
+ vmf->vmf_ip = POP(vmf);
+ vmf->vmf_cs = POP(vmf);
+ temp_flags = POP(vmf) & POP_MASK;
+ vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
+ | temp_flags | PSL_VM | PSL_I;
+ if (temp_flags & PSL_I) {
+ vmf->vmf_eflags |= PSL_VIF;
+ if (vmf->vmf_eflags & PSL_VIP)
+ break;
+ } else {
+ vmf->vmf_eflags &= ~PSL_VIF;
+ }
+ return (retcode);
+
+ }
+ return (SIGBUS);
+ }
+
+ switch (i_byte) {
+ case OPERAND_SIZE_PREFIX:
+ i_byte = fubyte(++addr);
+ inc_ip++;
+ switch (i_byte) {
+ case PUSHF:
+ if (vm86->vm86_eflags & PSL_VIF)
+ PUSHL((vmf->vmf_flags & PUSH_MASK)
+ | PSL_IOPL | PSL_I, vmf);
+ else
+ PUSHL((vmf->vmf_flags & PUSH_MASK)
+ | PSL_IOPL, vmf);
+ vmf->vmf_ip += inc_ip;
+ return (retcode);
+
+ case POPF:
+ temp_flags = POPL(vmf) & POP_MASK;
+ vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
+ | temp_flags | PSL_VM | PSL_I;
+ vmf->vmf_ip += inc_ip;
+ if (temp_flags & PSL_I) {
+ vm86->vm86_eflags |= PSL_VIF;
+ if (vm86->vm86_eflags & PSL_VIP)
+ break;
+ } else {
+ vm86->vm86_eflags &= ~PSL_VIF;
+ }
+ return (retcode);
+ }
+ return (SIGBUS);
+
+ case CLI:
+ vm86->vm86_eflags &= ~PSL_VIF;
+ vmf->vmf_ip += inc_ip;
+ return (retcode);
+
+ case STI:
+ /* if there is a pending interrupt, go to the emulator */
+ vm86->vm86_eflags |= PSL_VIF;
+ vmf->vmf_ip += inc_ip;
+ if (vm86->vm86_eflags & PSL_VIP)
+ break;
+ return (retcode);
+
+ case PUSHF:
+ if (vm86->vm86_eflags & PSL_VIF)
+ PUSH((vmf->vmf_flags & PUSH_MASK)
+ | PSL_IOPL | PSL_I, vmf);
+ else
+ PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
+ vmf->vmf_ip += inc_ip;
+ return (retcode);
+
+ case INTn:
+ i_byte = fubyte(addr + 1);
+ if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
+ break;
+ if (vm86->vm86_eflags & PSL_VIF)
+ PUSH((vmf->vmf_flags & PUSH_MASK)
+ | PSL_IOPL | PSL_I, vmf);
+ else
+ PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
+ PUSH(vmf->vmf_cs, vmf);
+ PUSH(vmf->vmf_ip + inc_ip + 1, vmf); /* increment IP */
+ GET_VEC(fuword((caddr_t)(i_byte * 4)),
+ &vmf->vmf_cs, &vmf->vmf_ip);
+ vmf->vmf_flags &= ~PSL_T;
+ vm86->vm86_eflags &= ~PSL_VIF;
+ return (retcode);
+
+ case IRET:
+ vmf->vmf_ip = POP(vmf);
+ vmf->vmf_cs = POP(vmf);
+ temp_flags = POP(vmf) & POP_MASK;
+ vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
+ | temp_flags | PSL_VM | PSL_I;
+ if (temp_flags & PSL_I) {
+ vm86->vm86_eflags |= PSL_VIF;
+ if (vm86->vm86_eflags & PSL_VIP)
+ break;
+ } else {
+ vm86->vm86_eflags &= ~PSL_VIF;
+ }
+ return (retcode);
+
+ case POPF:
+ temp_flags = POP(vmf) & POP_MASK;
+ vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
+ | temp_flags | PSL_VM | PSL_I;
+ vmf->vmf_ip += inc_ip;
+ if (temp_flags & PSL_I) {
+ vm86->vm86_eflags |= PSL_VIF;
+ if (vm86->vm86_eflags & PSL_VIP)
+ break;
+ } else {
+ vm86->vm86_eflags &= ~PSL_VIF;
+ }
+ return (retcode);
+ }
+ return (SIGBUS);
+}
+
+int
+i386_vm86(p, args, retval)
+ struct proc *p;
+ char *args;
+ int *retval;
+{
+ int error = 0;
+ struct i386_vm86_args ua;
+ struct vm86_kernel *vm86;
+
+ if (error = copyin(args, &ua, sizeof(struct i386_vm86_args)))
+ return (error);
+
+ if (p->p_addr->u_pcb.pcb_ext == 0)
+ if (error = i386_extend_pcb(p))
+ return (error);
+ vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86;
+
+ switch (ua.sub_op) {
+ case VM86_INIT: {
+ struct vm86_init_args sa;
+
+ if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
+ return (error);
+ if (cpu_feature & CPUID_VME)
+ vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
+ else
+ vm86->vm86_has_vme = 0;
+ vm86->vm86_inited = 1;
+ vm86->vm86_debug = sa.debug;
+ bcopy(&sa.int_map, vm86->vm86_intmap, 32);
+ }
+ break;
+
+#if 0
+ case VM86_SET_VME: {
+ struct vm86_vme_args sa;
+
+ if ((cpu_feature & CPUID_VME) == 0)
+ return (ENODEV);
+
+ if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
+ return (error);
+ if (sa.state)
+ load_cr4(rcr4() | CR4_VME);
+ else
+ load_cr4(rcr4() & ~CR4_VME);
+ }
+ break;
+#endif
+
+ case VM86_GET_VME: {
+ struct vm86_vme_args sa;
+
+ sa.state = (rcr4() & CR4_VME ? 1 : 0);
+ error = copyout(&sa, ua.sub_args, sizeof(sa));
+ }
+ break;
+
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef VM86_MODULE
+
+#include <sys/exec.h>
+#include <sys/sysent.h>
+#include <sys/lkm.h>
+
+MOD_MISC(vm86);
+
+static int
+vm86_load(struct lkm_table *lkmtp, int cmd)
+{
+ vm86_emulate = emulate_vm86;
+ vm86_sysarch = i386_vm86;
+ uprintf("vm86 emulator installed\n");
+ return 0;
+}
+
+static int
+vm86_unload(struct lkm_table *lkmtp, int cmd)
+{
+ vm86_emulate = 0;
+ vm86_sysarch = 0;
+ uprintf("vm86 emulator removed\n");
+ return 0;
+}
+
+int
+vm86_mod(struct lkm_table *lkmtp, int cmd, int ver)
+{
+ MOD_DISPATCH(vm86, lkmtp, cmd, ver,
+ vm86_load, vm86_unload, lkm_nullcmd);
+}
+
+#endif /* VM86_MODULE */
OpenPOWER on IntegriCloud