summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorrgrimes <rgrimes@FreeBSD.org>1993-06-12 14:58:17 +0000
committerrgrimes <rgrimes@FreeBSD.org>1993-06-12 14:58:17 +0000
commit25062ba061871945759b3baa833fe64969383e40 (patch)
tree2d1c31051ed0dbaad984013c9fe695b1a01e1c39 /sys/amd64
parentf078b88a160c467761b3f3641f05dfd0aa3f7753 (diff)
downloadFreeBSD-src-25062ba061871945759b3baa833fe64969383e40.zip
FreeBSD-src-25062ba061871945759b3baa833fe64969383e40.tar.gz
Initial import, 0.1 + pk 0.2.4-B1
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/Makefile24
-rw-r--r--sys/amd64/amd64/autoconf.c213
-rw-r--r--sys/amd64/amd64/db_disasm.c1397
-rw-r--r--sys/amd64/amd64/db_interface.c255
-rw-r--r--sys/amd64/amd64/db_trace.c292
-rw-r--r--sys/amd64/amd64/fpu.c564
-rw-r--r--sys/amd64/amd64/genassym.c174
-rw-r--r--sys/amd64/amd64/locore.S1830
-rw-r--r--sys/amd64/amd64/locore.s1830
-rw-r--r--sys/amd64/amd64/machdep.c1131
-rw-r--r--sys/amd64/amd64/mem.c191
-rw-r--r--sys/amd64/amd64/pmap.c1728
-rw-r--r--sys/amd64/amd64/sys_machdep.c105
-rw-r--r--sys/amd64/amd64/trap.c547
-rw-r--r--sys/amd64/amd64/tsc.c271
-rw-r--r--sys/amd64/amd64/vm_machdep.c410
-rw-r--r--sys/amd64/include/cpu.h108
-rw-r--r--sys/amd64/include/cpufunc.h82
-rw-r--r--sys/amd64/include/db_machdep.h154
-rw-r--r--sys/amd64/include/float.h74
-rw-r--r--sys/amd64/include/fpu.h146
-rw-r--r--sys/amd64/include/frame.h116
-rw-r--r--sys/amd64/include/npx.h146
-rw-r--r--sys/amd64/include/pc/display.h43
-rw-r--r--sys/amd64/include/pcb.h87
-rw-r--r--sys/amd64/include/pmap.h234
-rw-r--r--sys/amd64/include/proc.h47
-rw-r--r--sys/amd64/include/psl.h60
-rw-r--r--sys/amd64/include/reg.h93
-rw-r--r--sys/amd64/include/segments.h196
-rw-r--r--sys/amd64/include/specialreg.h67
-rw-r--r--sys/amd64/include/trap.h96
-rw-r--r--sys/amd64/include/tss.h78
-rw-r--r--sys/amd64/include/vmparam.h256
-rw-r--r--sys/amd64/isa/clock.c271
-rw-r--r--sys/amd64/isa/icu.h109
-rw-r--r--sys/amd64/isa/isa.c766
-rw-r--r--sys/amd64/isa/isa.h188
-rw-r--r--sys/amd64/isa/npx.c564
-rw-r--r--sys/amd64/isa/timerreg.h89
-rw-r--r--sys/amd64/isa/vector.S376
-rw-r--r--sys/amd64/isa/vector.s376
42 files changed, 15784 insertions, 0 deletions
diff --git a/sys/amd64/Makefile b/sys/amd64/Makefile
new file mode 100644
index 0000000..0662e28
--- /dev/null
+++ b/sys/amd64/Makefile
@@ -0,0 +1,24 @@
+# @(#)Makefile 7.3 (Berkeley) 6/9/91
+
+# Makefile for i386 tags file
+
+all:
+ @echo "make tags or links only"
+
+TI386= ../i386/tags
+SI386= ../i386/i386/*.[ch] ../i386/include/*.h ../i386/isa/*.[ch]
+AI386= ../i386/i386/*.s
+
+# Directories in which to place i386 tags links
+DI386= eisa isa mca include
+
+tags:
+ -ctags -dtf ${TI386} ${COMM} ${SI386}
+ egrep "^ENTRY(.*)|^ALTENTRY(.*)" ${AI386} | \
+ sed "s;\([^:]*\):\([^(]*\)(\([^, )]*\)\(.*\);\3 \1 /^\2(\3\4$$/;" \
+ >> ${TI386}
+ sort -o ${TI386} ${TI386}
+
+links:
+ -for i in ${DI386}; do \
+ cd $$i && rm -f tags; ln -s ../tags tags; done
diff --git a/sys/amd64/amd64/autoconf.c b/sys/amd64/amd64/autoconf.c
new file mode 100644
index 0000000..7eee991
--- /dev/null
+++ b/sys/amd64/amd64/autoconf.c
@@ -0,0 +1,213 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)autoconf.c 7.1 (Berkeley) 5/9/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00117
+ * -------------------- ----- ----------------------
+ *
+ * 09 Apr 93 ???(From sun-lamp) Fix to report sd when Julians
+ * scsi code is used, allow you to swap
+ * root floppies during a boot
+ */
+static char rcsid[] = "$Header: /b/source/CVS/src/sys.386bsd/i386/i386/autoconf.c,v 1.3 1993/04/10 21:58:52 cgd Exp $";
+
+/*
+ * Setup the system to run on the current machine.
+ *
+ * Configure() is called at boot time and initializes the vba
+ * device tables and the memory controller monitoring. Available
+ * devices are determined (from possibilities mentioned in ioconf.c),
+ * and the drivers are initialized.
+ */
+#include "param.h"
+#include "systm.h"
+#include "buf.h"
+#include "dkstat.h"
+#include "conf.h"
+#include "dmap.h"
+#include "reboot.h"
+
+#include "machine/pte.h"
+
+/*
+ * The following several variables are related to
+ * the configuration process, and are used in initializing
+ * the machine.
+ */
+int dkn; /* number of iostat dk numbers assigned so far */
+extern int cold; /* cold start flag initialized in locore.s */
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+configure()
+{
+
+#include "isa.h"
+#if NISA > 0
+ isa_configure();
+#endif
+
+#if GENERICxxx
+ if ((boothowto & RB_ASKNAME) == 0)
+ setroot();
+ setconf();
+#else
+ setroot();
+#endif
+ /*
+ * Configure swap area and related system
+ * parameter based on device(s) used.
+ */
+ swapconf();
+ cold = 0;
+}
+
+/*
+ * Configure swap space and related parameters.
+ */
+swapconf()
+{
+ register struct swdevt *swp;
+ register int nblks;
+extern int Maxmem;
+
+ for (swp = swdevt; swp->sw_dev > 0; swp++)
+ {
+ unsigned d = major(swp->sw_dev);
+
+ if (d > nblkdev) break;
+ if (bdevsw[d].d_psize) {
+ nblks = (*bdevsw[d].d_psize)(swp->sw_dev);
+ if (nblks > 0 &&
+ (swp->sw_nblks == 0 || swp->sw_nblks > nblks))
+ swp->sw_nblks = nblks;
+ else
+ swp->sw_nblks = 0;
+ }
+ swp->sw_nblks = ctod(dtoc(swp->sw_nblks));
+ }
+ if (dumplo == 0 && bdevsw[major(dumpdev)].d_psize)
+ /*dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) - physmem;*/
+ dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) -
+ Maxmem*NBPG/512;
+ if (dumplo < 0)
+ dumplo = 0;
+}
+
+#define DOSWAP /* change swdevt and dumpdev */
+u_long bootdev = 0; /* should be dev_t, but not until 32 bits */
+
+#include "sd.h"
+static char devname[][2] = {
+ 'w','d', /* 0 = wd */
+ 's','w', /* 1 = sw */
+ 'f','d', /* 2 = fd */
+ 'w','t', /* 3 = wt */
+#if NSD < 1
+ 'a','s', /* 4 = as */
+#else
+ 's','d', /* 4 = sd -- new SCSI system */
+#endif
+};
+
+#define PARTITIONMASK 0x7
+#define PARTITIONSHIFT 3
+
+/*
+ * Attempt to find the device from which we were booted.
+ * If we can do so, and not instructed not to do so,
+ * change rootdev to correspond to the load device.
+ */
+setroot()
+{
+ int majdev, mindev, unit, part, adaptor;
+ dev_t temp, orootdev;
+ struct swdevt *swp;
+
+/*printf("howto %x bootdev %x ", boothowto, bootdev);*/
+ if (boothowto & RB_DFLTROOT ||
+ (bootdev & B_MAGICMASK) != (u_long)B_DEVMAGIC)
+ return;
+ majdev = (bootdev >> B_TYPESHIFT) & B_TYPEMASK;
+ if (majdev > sizeof(devname) / sizeof(devname[0]))
+ return;
+ adaptor = (bootdev >> B_ADAPTORSHIFT) & B_ADAPTORMASK;
+ part = (bootdev >> B_PARTITIONSHIFT) & B_PARTITIONMASK;
+ unit = (bootdev >> B_UNITSHIFT) & B_UNITMASK;
+ mindev = (unit << PARTITIONSHIFT) + part;
+ orootdev = rootdev;
+ rootdev = makedev(majdev, mindev);
+ /*
+ * If the original rootdev is the same as the one
+ * just calculated, don't need to adjust the swap configuration.
+ */
+ if (rootdev == orootdev)
+ return;
+ if (devname[majdev][0] == 'f' && devname[majdev][1] == 'd') {
+ printf("");
+ printf("* insert the floppy you want to have mounted as\n");
+ printf("* root, and hit any key to continue booting:\n");
+ cngetc();
+ printf("");
+ }
+ printf("changing root device to %c%c%d%c\n",
+ devname[majdev][0], devname[majdev][1],
+ mindev >> PARTITIONSHIFT, part + 'a');
+#ifdef DOSWAP
+ mindev &= ~PARTITIONMASK;
+ for (swp = swdevt; swp->sw_dev; swp++) {
+ if (majdev == major(swp->sw_dev) &&
+ mindev == (minor(swp->sw_dev) & ~PARTITIONMASK)) {
+
+ temp = swdevt[0].sw_dev;
+ swdevt[0].sw_dev = swp->sw_dev;
+ swp->sw_dev = temp;
+ break;
+ }
+ }
+ if (swp->sw_dev == 0)
+ return;
+ /*
+ * If dumpdev was the same as the old primary swap
+ * device, move it to the new primary swap device.
+ */
+ if (temp == dumpdev)
+ dumpdev = swdevt[0].sw_dev;
+#endif
+}
diff --git a/sys/amd64/amd64/db_disasm.c b/sys/amd64/amd64/db_disasm.c
new file mode 100644
index 0000000..20430b6
--- /dev/null
+++ b/sys/amd64/amd64/db_disasm.c
@@ -0,0 +1,1397 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: db_disasm.c,v $
+ * Revision 1.1 1992/03/25 21:42:01 pace
+ * Initial revision
+ *
+ * Revision 2.3 91/02/05 17:11:03 mrt
+ * Changed to new Mach copyright
+ * [91/02/01 17:31:03 mrt]
+ *
+ * Revision 2.2 90/08/27 21:55:56 dbg
+ * Fix register operand for move to/from control/test/debug
+ * register instructions. Add i486 instructions.
+ * [90/08/27 dbg]
+ *
+ * Import db_sym.h. Print instruction displacements in
+ * current radix (signed). Change calling sequence of
+ * db_disasm.
+ * [90/08/21 dbg]
+ * Fix includes.
+ * [90/08/08 dbg]
+ * Created.
+ * [90/07/25 dbg]
+ *
+ */
+
+/*
+ * Instruction disassembler.
+ */
+#include "param.h"
+#include "proc.h"
+#include <machine/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+
+/*
+ * Size attributes
+ */
+#define BYTE 0
+#define WORD 1
+#define LONG 2
+#define QUAD 3
+#define SNGL 4
+#define DBLR 5
+#define EXTR 6
+#define SDEP 7
+#define NONE 8
+
+/*
+ * Addressing modes
+ */
+#define E 1 /* general effective address */
+#define Eind 2 /* indirect address (jump, call) */
+#define Ew 3 /* address, word size */
+#define Eb 4 /* address, byte size */
+#define R 5 /* register, in 'reg' field */
+#define Rw 6 /* word register, in 'reg' field */
+#define Ri 7 /* register in instruction */
+#define S 8 /* segment reg, in 'reg' field */
+#define Si 9 /* segment reg, in instruction */
+#define A 10 /* accumulator */
+#define BX 11 /* (bx) */
+#define CL 12 /* cl, for shifts */
+#define DX 13 /* dx, for IO */
+#define SI 14 /* si */
+#define DI 15 /* di */
+#define CR 16 /* control register */
+#define DR 17 /* debug register */
+#define TR 18 /* test register */
+#define I 19 /* immediate, unsigned */
+#define Is 20 /* immediate, signed */
+#define Ib 21 /* byte immediate, unsigned */
+#define Ibs 22 /* byte immediate, signed */
+#define Iw 23 /* word immediate, unsigned */
+#define Il 24 /* long immediate */
+#define O 25 /* direct address */
+#define Db 26 /* byte displacement from EIP */
+#define Dl 27 /* long displacement from EIP */
+#define o1 28 /* constant 1 */
+#define o3 29 /* constant 3 */
+#define OS 30 /* immediate offset/segment */
+#define ST 31 /* FP stack top */
+#define STI 32 /* FP stack */
+#define X 33 /* extended FP op */
+#define XA 34 /* for 'fstcw %ax' */
+
+struct inst {
+ char * i_name; /* name */
+ short i_has_modrm; /* has regmodrm byte */
+ short i_size; /* operand size */
+ int i_mode; /* addressing modes */
+ char * i_extra; /* pointer to extra opcode table */
+};
+
+#define op1(x) (x)
+#define op2(x,y) ((x)|((y)<<8))
+#define op3(x,y,z) ((x)|((y)<<8)|((z)<<16))
+
+struct finst {
+ char * f_name; /* name for memory instruction */
+ int f_size; /* size for memory instruction */
+ int f_rrmode; /* mode for rr instruction */
+ char * f_rrname; /* name for rr instruction
+ (or pointer to table) */
+};
+
+char * db_Grp6[] = {
+ "sldt",
+ "str",
+ "lldt",
+ "ltr",
+ "verr",
+ "verw",
+ "",
+ ""
+};
+
+char * db_Grp7[] = {
+ "sgdt",
+ "sidt",
+ "lgdt",
+ "lidt",
+ "smsw",
+ "",
+ "lmsw",
+ "invlpg"
+};
+
+char * db_Grp8[] = {
+ "",
+ "",
+ "",
+ "",
+ "bt",
+ "bts",
+ "btr",
+ "btc"
+};
+
+struct inst db_inst_0f0x[] = {
+/*00*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp6 },
+/*01*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp7 },
+/*02*/ { "lar", TRUE, LONG, op2(E,R), 0 },
+/*03*/ { "lsl", TRUE, LONG, op2(E,R), 0 },
+/*04*/ { "", FALSE, NONE, 0, 0 },
+/*05*/ { "", FALSE, NONE, 0, 0 },
+/*06*/ { "clts", FALSE, NONE, 0, 0 },
+/*07*/ { "", FALSE, NONE, 0, 0 },
+
+/*08*/ { "invd", FALSE, NONE, 0, 0 },
+/*09*/ { "wbinvd",FALSE, NONE, 0, 0 },
+/*0a*/ { "", FALSE, NONE, 0, 0 },
+/*0b*/ { "", FALSE, NONE, 0, 0 },
+/*0c*/ { "", FALSE, NONE, 0, 0 },
+/*0d*/ { "", FALSE, NONE, 0, 0 },
+/*0e*/ { "", FALSE, NONE, 0, 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f2x[] = {
+/*20*/ { "mov", TRUE, LONG, op2(CR,E), 0 }, /* use E for reg */
+/*21*/ { "mov", TRUE, LONG, op2(DR,E), 0 }, /* since mod == 11 */
+/*22*/ { "mov", TRUE, LONG, op2(E,CR), 0 },
+/*23*/ { "mov", TRUE, LONG, op2(E,DR), 0 },
+/*24*/ { "mov", TRUE, LONG, op2(TR,E), 0 },
+/*25*/ { "", FALSE, NONE, 0, 0 },
+/*26*/ { "mov", TRUE, LONG, op2(E,TR), 0 },
+/*27*/ { "", FALSE, NONE, 0, 0 },
+
+/*28*/ { "", FALSE, NONE, 0, 0 },
+/*29*/ { "", FALSE, NONE, 0, 0 },
+/*2a*/ { "", FALSE, NONE, 0, 0 },
+/*2b*/ { "", FALSE, NONE, 0, 0 },
+/*2c*/ { "", FALSE, NONE, 0, 0 },
+/*2d*/ { "", FALSE, NONE, 0, 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f8x[] = {
+/*80*/ { "jo", FALSE, NONE, op1(Dl), 0 },
+/*81*/ { "jno", FALSE, NONE, op1(Dl), 0 },
+/*82*/ { "jb", FALSE, NONE, op1(Dl), 0 },
+/*83*/ { "jnb", FALSE, NONE, op1(Dl), 0 },
+/*84*/ { "jz", FALSE, NONE, op1(Dl), 0 },
+/*85*/ { "jnz", FALSE, NONE, op1(Dl), 0 },
+/*86*/ { "jbe", FALSE, NONE, op1(Dl), 0 },
+/*87*/ { "jnbe", FALSE, NONE, op1(Dl), 0 },
+
+/*88*/ { "js", FALSE, NONE, op1(Dl), 0 },
+/*89*/ { "jns", FALSE, NONE, op1(Dl), 0 },
+/*8a*/ { "jp", FALSE, NONE, op1(Dl), 0 },
+/*8b*/ { "jnp", FALSE, NONE, op1(Dl), 0 },
+/*8c*/ { "jl", FALSE, NONE, op1(Dl), 0 },
+/*8d*/ { "jnl", FALSE, NONE, op1(Dl), 0 },
+/*8e*/ { "jle", FALSE, NONE, op1(Dl), 0 },
+/*8f*/ { "jnle", FALSE, NONE, op1(Dl), 0 },
+};
+
+struct inst db_inst_0f9x[] = {
+/*90*/ { "seto", TRUE, NONE, op1(Eb), 0 },
+/*91*/ { "setno", TRUE, NONE, op1(Eb), 0 },
+/*92*/ { "setb", TRUE, NONE, op1(Eb), 0 },
+/*93*/ { "setnb", TRUE, NONE, op1(Eb), 0 },
+/*94*/ { "setz", TRUE, NONE, op1(Eb), 0 },
+/*95*/ { "setnz", TRUE, NONE, op1(Eb), 0 },
+/*96*/ { "setbe", TRUE, NONE, op1(Eb), 0 },
+/*97*/ { "setnbe",TRUE, NONE, op1(Eb), 0 },
+
+/*98*/ { "sets", TRUE, NONE, op1(Eb), 0 },
+/*99*/ { "setns", TRUE, NONE, op1(Eb), 0 },
+/*9a*/ { "setp", TRUE, NONE, op1(Eb), 0 },
+/*9b*/ { "setnp", TRUE, NONE, op1(Eb), 0 },
+/*9c*/ { "setl", TRUE, NONE, op1(Eb), 0 },
+/*9d*/ { "setnl", TRUE, NONE, op1(Eb), 0 },
+/*9e*/ { "setle", TRUE, NONE, op1(Eb), 0 },
+/*9f*/ { "setnle",TRUE, NONE, op1(Eb), 0 },
+};
+
+struct inst db_inst_0fax[] = {
+/*a0*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a1*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*a2*/ { "", FALSE, NONE, 0, 0 },
+/*a3*/ { "bt", TRUE, LONG, op2(E,R), 0 },
+/*a4*/ { "shld", TRUE, LONG, op3(Ib,E,R), 0 },
+/*a5*/ { "shld", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "", FALSE, NONE, 0, 0 },
+
+/*a8*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a9*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*aa*/ { "", FALSE, NONE, 0, 0 },
+/*ab*/ { "bts", TRUE, LONG, op2(E,R), 0 },
+/*ac*/ { "shrd", TRUE, LONG, op3(Ib,E,R), 0 },
+/*ad*/ { "shrd", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "imul", TRUE, LONG, op2(E,R), 0 },
+};
+
+struct inst db_inst_0fbx[] = {
+/*b0*/ { "", FALSE, NONE, 0, 0 },
+/*b1*/ { "", FALSE, NONE, 0, 0 },
+/*b2*/ { "lss", TRUE, LONG, op2(E, R), 0 },
+/*b3*/ { "bts", TRUE, LONG, op2(R, E), 0 },
+/*b4*/ { "lfs", TRUE, LONG, op2(E, R), 0 },
+/*b5*/ { "lgs", TRUE, LONG, op2(E, R), 0 },
+/*b6*/ { "movzb", TRUE, LONG, op2(E, R), 0 },
+/*b7*/ { "movzw", TRUE, LONG, op2(E, R), 0 },
+
+/*b8*/ { "", FALSE, NONE, 0, 0 },
+/*b9*/ { "", FALSE, NONE, 0, 0 },
+/*ba*/ { "", TRUE, LONG, op2(Is, E), (char *)db_Grp8 },
+/*bb*/ { "btc", TRUE, LONG, op2(R, E), 0 },
+/*bc*/ { "bsf", TRUE, LONG, op2(E, R), 0 },
+/*bd*/ { "bsr", TRUE, LONG, op2(E, R), 0 },
+/*be*/ { "movsb", TRUE, LONG, op2(E, R), 0 },
+/*bf*/ { "movsw", TRUE, LONG, op2(E, R), 0 },
+};
+
+struct inst db_inst_0fcx[] = {
+/*c0*/ { "xadd", TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "xadd", TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*c9*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ca*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cb*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cc*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cd*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ce*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cf*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+};
+
+struct inst db_inst_0fdx[] = {
+/*c0*/ { "cmpxchg",TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "cmpxchg",TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "", FALSE, NONE, 0, 0 },
+/*c9*/ { "", FALSE, NONE, 0, 0 },
+/*ca*/ { "", FALSE, NONE, 0, 0 },
+/*cb*/ { "", FALSE, NONE, 0, 0 },
+/*cc*/ { "", FALSE, NONE, 0, 0 },
+/*cd*/ { "", FALSE, NONE, 0, 0 },
+/*ce*/ { "", FALSE, NONE, 0, 0 },
+/*cf*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst *db_inst_0f[] = {
+ db_inst_0f0x,
+ 0,
+ db_inst_0f2x,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ db_inst_0f8x,
+ db_inst_0f9x,
+ db_inst_0fax,
+ db_inst_0fbx,
+ db_inst_0fcx,
+ db_inst_0fdx,
+ 0,
+ 0
+};
+
+char * db_Esc92[] = {
+ "fnop", "", "", "", "", "", "", ""
+};
+char * db_Esc93[] = {
+ "", "", "", "", "", "", "", ""
+};
+char * db_Esc94[] = {
+ "fchs", "fabs", "", "", "ftst", "fxam", "", ""
+};
+char * db_Esc95[] = {
+ "fld1", "fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz",""
+};
+char * db_Esc96[] = {
+ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp",
+ "fincstp"
+};
+char * db_Esc97[] = {
+ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"
+};
+
+char * db_Esca4[] = {
+ "", "fucompp","", "", "", "", "", ""
+};
+
+char * db_Escb4[] = {
+ "", "", "fnclex","fninit","", "", "", ""
+};
+
+char * db_Esce3[] = {
+ "", "fcompp","", "", "", "", "", ""
+};
+
+char * db_Escf4[] = {
+ "fnstsw","", "", "", "", "", "", ""
+};
+
+struct finst db_Esc8[] = {
+/*0*/ { "fadd", SNGL, op2(STI,ST), 0 },
+/*1*/ { "fmul", SNGL, op2(STI,ST), 0 },
+/*2*/ { "fcom", SNGL, op2(STI,ST), 0 },
+/*3*/ { "fcomp", SNGL, op2(STI,ST), 0 },
+/*4*/ { "fsub", SNGL, op2(STI,ST), 0 },
+/*5*/ { "fsubr", SNGL, op2(STI,ST), 0 },
+/*6*/ { "fdiv", SNGL, op2(STI,ST), 0 },
+/*7*/ { "fdivr", SNGL, op2(STI,ST), 0 },
+};
+
+struct finst db_Esc9[] = {
+/*0*/ { "fld", SNGL, op1(STI), 0 },
+/*1*/ { "", NONE, op1(STI), "fxch" },
+/*2*/ { "fst", SNGL, op1(X), (char *)db_Esc92 },
+/*3*/ { "fstp", SNGL, op1(X), (char *)db_Esc93 },
+/*4*/ { "fldenv", NONE, op1(X), (char *)db_Esc94 },
+/*5*/ { "fldcw", NONE, op1(X), (char *)db_Esc95 },
+/*6*/ { "fnstenv",NONE, op1(X), (char *)db_Esc96 },
+/*7*/ { "fnstcw", NONE, op1(X), (char *)db_Esc97 },
+};
+
+struct finst db_Esca[] = {
+/*0*/ { "fiadd", WORD, 0, 0 },
+/*1*/ { "fimul", WORD, 0, 0 },
+/*2*/ { "ficom", WORD, 0, 0 },
+/*3*/ { "ficomp", WORD, 0, 0 },
+/*4*/ { "fisub", WORD, op1(X), (char *)db_Esca4 },
+/*5*/ { "fisubr", WORD, 0, 0 },
+/*6*/ { "fidiv", WORD, 0, 0 },
+/*7*/ { "fidivr", WORD, 0, 0 }
+};
+
+struct finst db_Escb[] = {
+/*0*/ { "fild", WORD, 0, 0 },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fist", WORD, 0, 0 },
+/*3*/ { "fistp", WORD, 0, 0 },
+/*4*/ { "", WORD, op1(X), (char *)db_Escb4 },
+/*5*/ { "fld", EXTR, 0, 0 },
+/*6*/ { "", WORD, 0, 0 },
+/*7*/ { "fstp", EXTR, 0, 0 },
+};
+
+struct finst db_Escc[] = {
+/*0*/ { "fadd", DBLR, op2(ST,STI), 0 },
+/*1*/ { "fmul", DBLR, op2(ST,STI), 0 },
+/*2*/ { "fcom", DBLR, op2(ST,STI), 0 },
+/*3*/ { "fcomp", DBLR, op2(ST,STI), 0 },
+/*4*/ { "fsub", DBLR, op2(ST,STI), "fsubr" },
+/*5*/ { "fsubr", DBLR, op2(ST,STI), "fsub" },
+/*6*/ { "fdiv", DBLR, op2(ST,STI), "fdivr" },
+/*7*/ { "fdivr", DBLR, op2(ST,STI), "fdiv" },
+};
+
+struct finst db_Escd[] = {
+/*0*/ { "fld", DBLR, op1(STI), "ffree" },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fst", DBLR, op1(STI), 0 },
+/*3*/ { "fstp", DBLR, op1(STI), 0 },
+/*4*/ { "frstor", NONE, op1(STI), "fucom" },
+/*5*/ { "", NONE, op1(STI), "fucomp" },
+/*6*/ { "fnsave", NONE, 0, 0 },
+/*7*/ { "fnstsw", NONE, 0, 0 },
+};
+
+struct finst db_Esce[] = {
+/*0*/ { "fiadd", LONG, op2(ST,STI), "faddp" },
+/*1*/ { "fimul", LONG, op2(ST,STI), "fmulp" },
+/*2*/ { "ficom", LONG, 0, 0 },
+/*3*/ { "ficomp", LONG, op1(X), (char *)db_Esce3 },
+/*4*/ { "fisub", LONG, op2(ST,STI), "fsubrp" },
+/*5*/ { "fisubr", LONG, op2(ST,STI), "fsubp" },
+/*6*/ { "fidiv", LONG, op2(ST,STI), "fdivrp" },
+/*7*/ { "fidivr", LONG, op2(ST,STI), "fdivp" },
+};
+
+struct finst db_Escf[] = {
+/*0*/ { "fild", LONG, 0, 0 },
+/*1*/ { "", LONG, 0, 0 },
+/*2*/ { "fist", LONG, 0, 0 },
+/*3*/ { "fistp", LONG, 0, 0 },
+/*4*/ { "fbld", NONE, op1(XA), (char *)db_Escf4 },
+/*5*/ { "fld", QUAD, 0, 0 },
+/*6*/ { "fbstp", NONE, 0, 0 },
+/*7*/ { "fstp", QUAD, 0, 0 },
+};
+
+struct finst *db_Esc_inst[] = {
+ db_Esc8, db_Esc9, db_Esca, db_Escb,
+ db_Escc, db_Escd, db_Esce, db_Escf
+};
+
+char * db_Grp1[] = {
+ "add",
+ "or",
+ "adc",
+ "sbb",
+ "and",
+ "sub",
+ "xor",
+ "cmp"
+};
+
+char * db_Grp2[] = {
+ "rol",
+ "ror",
+ "rcl",
+ "rcr",
+ "shl",
+ "shr",
+ "shl",
+ "sar"
+};
+
+struct inst db_Grp3[] = {
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "not", TRUE, NONE, op1(E), 0 },
+ { "neg", TRUE, NONE, op1(E), 0 },
+ { "mul", TRUE, NONE, op2(E,A), 0 },
+ { "imul", TRUE, NONE, op2(E,A), 0 },
+ { "div", TRUE, NONE, op2(E,A), 0 },
+ { "idiv", TRUE, NONE, op2(E,A), 0 },
+};
+
+struct inst db_Grp4[] = {
+ { "inc", TRUE, BYTE, op1(E), 0 },
+ { "dec", TRUE, BYTE, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_Grp5[] = {
+ { "inc", TRUE, LONG, op1(E), 0 },
+ { "dec", TRUE, LONG, op1(E), 0 },
+ { "call", TRUE, NONE, op1(Eind),0 },
+ { "lcall", TRUE, NONE, op1(Eind),0 },
+ { "jmp", TRUE, NONE, op1(Eind),0 },
+ { "ljmp", TRUE, NONE, op1(Eind),0 },
+ { "push", TRUE, LONG, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_inst_table[256] = {
+/*00*/ { "add", TRUE, BYTE, op2(R, E), 0 },
+/*01*/ { "add", TRUE, LONG, op2(R, E), 0 },
+/*02*/ { "add", TRUE, BYTE, op2(E, R), 0 },
+/*03*/ { "add", TRUE, LONG, op2(E, R), 0 },
+/*04*/ { "add", FALSE, BYTE, op2(Is, A), 0 },
+/*05*/ { "add", FALSE, LONG, op2(Is, A), 0 },
+/*06*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*07*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*08*/ { "or", TRUE, BYTE, op2(R, E), 0 },
+/*09*/ { "or", TRUE, LONG, op2(R, E), 0 },
+/*0a*/ { "or", TRUE, BYTE, op2(E, R), 0 },
+/*0b*/ { "or", TRUE, LONG, op2(E, R), 0 },
+/*0c*/ { "or", FALSE, BYTE, op2(I, A), 0 },
+/*0d*/ { "or", FALSE, LONG, op2(I, A), 0 },
+/*0e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+
+/*10*/ { "adc", TRUE, BYTE, op2(R, E), 0 },
+/*11*/ { "adc", TRUE, LONG, op2(R, E), 0 },
+/*12*/ { "adc", TRUE, BYTE, op2(E, R), 0 },
+/*13*/ { "adc", TRUE, LONG, op2(E, R), 0 },
+/*14*/ { "adc", FALSE, BYTE, op2(Is, A), 0 },
+/*15*/ { "adc", FALSE, LONG, op2(Is, A), 0 },
+/*16*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*17*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*18*/ { "sbb", TRUE, BYTE, op2(R, E), 0 },
+/*19*/ { "sbb", TRUE, LONG, op2(R, E), 0 },
+/*1a*/ { "sbb", TRUE, BYTE, op2(E, R), 0 },
+/*1b*/ { "sbb", TRUE, LONG, op2(E, R), 0 },
+/*1c*/ { "sbb", FALSE, BYTE, op2(Is, A), 0 },
+/*1d*/ { "sbb", FALSE, LONG, op2(Is, A), 0 },
+/*1e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*1f*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*20*/ { "and", TRUE, BYTE, op2(R, E), 0 },
+/*21*/ { "and", TRUE, LONG, op2(R, E), 0 },
+/*22*/ { "and", TRUE, BYTE, op2(E, R), 0 },
+/*23*/ { "and", TRUE, LONG, op2(E, R), 0 },
+/*24*/ { "and", FALSE, BYTE, op2(I, A), 0 },
+/*25*/ { "and", FALSE, LONG, op2(I, A), 0 },
+/*26*/ { "", FALSE, NONE, 0, 0 },
+/*27*/ { "aaa", FALSE, NONE, 0, 0 },
+
+/*28*/ { "sub", TRUE, BYTE, op2(R, E), 0 },
+/*29*/ { "sub", TRUE, LONG, op2(R, E), 0 },
+/*2a*/ { "sub", TRUE, BYTE, op2(E, R), 0 },
+/*2b*/ { "sub", TRUE, LONG, op2(E, R), 0 },
+/*2c*/ { "sub", FALSE, BYTE, op2(Is, A), 0 },
+/*2d*/ { "sub", FALSE, LONG, op2(Is, A), 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "das", FALSE, NONE, 0, 0 },
+
+/*30*/ { "xor", TRUE, BYTE, op2(R, E), 0 },
+/*31*/ { "xor", TRUE, LONG, op2(R, E), 0 },
+/*32*/ { "xor", TRUE, BYTE, op2(E, R), 0 },
+/*33*/ { "xor", TRUE, LONG, op2(E, R), 0 },
+/*34*/ { "xor", FALSE, BYTE, op2(I, A), 0 },
+/*35*/ { "xor", FALSE, LONG, op2(I, A), 0 },
+/*36*/ { "", FALSE, NONE, 0, 0 },
+/*37*/ { "daa", FALSE, NONE, 0, 0 },
+
+/*38*/ { "cmp", TRUE, BYTE, op2(R, E), 0 },
+/*39*/ { "cmp", TRUE, LONG, op2(R, E), 0 },
+/*3a*/ { "cmp", TRUE, BYTE, op2(E, R), 0 },
+/*3b*/ { "cmp", TRUE, LONG, op2(E, R), 0 },
+/*3c*/ { "cmp", FALSE, BYTE, op2(Is, A), 0 },
+/*3d*/ { "cmp", FALSE, LONG, op2(Is, A), 0 },
+/*3e*/ { "", FALSE, NONE, 0, 0 },
+/*3f*/ { "aas", FALSE, NONE, 0, 0 },
+
+/*40*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*41*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*42*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*43*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*44*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*45*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*46*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*47*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+
+/*48*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*49*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4a*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4b*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4c*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4d*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4e*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4f*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+
+/*50*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*51*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*52*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*53*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*54*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*55*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*56*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*57*/ { "push", FALSE, LONG, op1(Ri), 0 },
+
+/*58*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*59*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5a*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5b*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5c*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5d*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5e*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5f*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+
+/*60*/ { "pusha", FALSE, LONG, 0, 0 },
+/*61*/ { "popa", FALSE, LONG, 0, 0 },
+/*62*/ { "bound", TRUE, LONG, op2(E, R), 0 },
+/*63*/ { "arpl", TRUE, NONE, op2(Ew,Rw), 0 },
+
+/*64*/ { "", FALSE, NONE, 0, 0 },
+/*65*/ { "", FALSE, NONE, 0, 0 },
+/*66*/ { "", FALSE, NONE, 0, 0 },
+/*67*/ { "", FALSE, NONE, 0, 0 },
+
+/*68*/ { "push", FALSE, LONG, op1(I), 0 },
+/*69*/ { "imul", TRUE, LONG, op3(I,E,R), 0 },
+/*6a*/ { "push", FALSE, LONG, op1(Ib), 0 },
+/*6b*/ { "imul", TRUE, LONG, op3(Ibs,E,R),0 },
+/*6c*/ { "ins", FALSE, BYTE, op2(DX, DI), 0 },
+/*6d*/ { "ins", FALSE, LONG, op2(DX, DI), 0 },
+/*6e*/ { "outs", FALSE, BYTE, op2(SI, DX), 0 },
+/*6f*/ { "outs", FALSE, LONG, op2(SI, DX), 0 },
+
+/*70*/ { "jo", FALSE, NONE, op1(Db), 0 },
+/*71*/ { "jno", FALSE, NONE, op1(Db), 0 },
+/*72*/ { "jb", FALSE, NONE, op1(Db), 0 },
+/*73*/ { "jnb", FALSE, NONE, op1(Db), 0 },
+/*74*/ { "jz", FALSE, NONE, op1(Db), 0 },
+/*75*/ { "jnz", FALSE, NONE, op1(Db), 0 },
+/*76*/ { "jbe", FALSE, NONE, op1(Db), 0 },
+/*77*/ { "jnbe", FALSE, NONE, op1(Db), 0 },
+
+/*78*/ { "js", FALSE, NONE, op1(Db), 0 },
+/*79*/ { "jns", FALSE, NONE, op1(Db), 0 },
+/*7a*/ { "jp", FALSE, NONE, op1(Db), 0 },
+/*7b*/ { "jnp", FALSE, NONE, op1(Db), 0 },
+/*7c*/ { "jl", FALSE, NONE, op1(Db), 0 },
+/*7d*/ { "jnl", FALSE, NONE, op1(Db), 0 },
+/*7e*/ { "jle", FALSE, NONE, op1(Db), 0 },
+/*7f*/ { "jnle", FALSE, NONE, op1(Db), 0 },
+
+/*80*/ { "", TRUE, BYTE, op2(I, E), (char *)db_Grp1 },
+/*81*/ { "", TRUE, LONG, op2(I, E), (char *)db_Grp1 },
+/*82*/ { "", TRUE, BYTE, op2(Is,E), (char *)db_Grp1 },
+/*83*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp1 },
+/*84*/ { "test", TRUE, BYTE, op2(R, E), 0 },
+/*85*/ { "test", TRUE, LONG, op2(R, E), 0 },
+/*86*/ { "xchg", TRUE, BYTE, op2(R, E), 0 },
+/*87*/ { "xchg", TRUE, LONG, op2(R, E), 0 },
+
+/*88*/ { "mov", TRUE, BYTE, op2(R, E), 0 },
+/*89*/ { "mov", TRUE, LONG, op2(R, E), 0 },
+/*8a*/ { "mov", TRUE, BYTE, op2(E, R), 0 },
+/*8b*/ { "mov", TRUE, LONG, op2(E, R), 0 },
+/*8c*/ { "mov", TRUE, NONE, op2(S, Ew), 0 },
+/*8d*/ { "lea", TRUE, LONG, op2(E, R), 0 },
+/*8e*/ { "mov", TRUE, NONE, op2(Ew, S), 0 },
+/*8f*/ { "pop", TRUE, LONG, op1(E), 0 },
+
+/*90*/ { "nop", FALSE, NONE, 0, 0 },
+/*91*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*92*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*93*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*94*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*95*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*96*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*97*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+
+/*98*/ { "cbw", FALSE, SDEP, 0, "cwde" }, /* cbw/cwde */
+/*99*/ { "cwd", FALSE, SDEP, 0, "cdq" }, /* cwd/cdq */
+/*9a*/ { "lcall", FALSE, NONE, op1(OS), 0 },
+/*9b*/ { "wait", FALSE, NONE, 0, 0 },
+/*9c*/ { "pushf", FALSE, LONG, 0, 0 },
+/*9d*/ { "popf", FALSE, LONG, 0, 0 },
+/*9e*/ { "sahf", FALSE, NONE, 0, 0 },
+/*9f*/ { "lahf", FALSE, NONE, 0, 0 },
+
+/*a0*/ { "mov", FALSE, BYTE, op2(O, A), 0 },
+/*a1*/ { "mov", FALSE, LONG, op2(O, A), 0 },
+/*a2*/ { "mov", FALSE, BYTE, op2(A, O), 0 },
+/*a3*/ { "mov", FALSE, LONG, op2(A, O), 0 },
+/*a4*/ { "movs", FALSE, BYTE, op2(SI,DI), 0 },
+/*a5*/ { "movs", FALSE, LONG, op2(SI,DI), 0 },
+/*a6*/ { "cmps", FALSE, BYTE, op2(SI,DI), 0 },
+/*a7*/ { "cmps", FALSE, LONG, op2(SI,DI), 0 },
+
+/*a8*/ { "test", FALSE, BYTE, op2(I, A), 0 },
+/*a9*/ { "test", FALSE, LONG, op2(I, A), 0 },
+/*aa*/ { "stos", FALSE, BYTE, op1(DI), 0 },
+/*ab*/ { "stos", FALSE, LONG, op1(DI), 0 },
+/*ac*/ { "ldos", FALSE, BYTE, op1(SI), 0 },
+/*ad*/ { "ldos", FALSE, LONG, op1(SI), 0 },
+/*ae*/ { "scas", FALSE, BYTE, op1(SI), 0 },
+/*af*/ { "scas", FALSE, LONG, op1(SI), 0 },
+
+/*b0*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b1*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b2*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b3*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b4*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b5*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b6*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b7*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+
+/*b8*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*b9*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*ba*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bb*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bc*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bd*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*be*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bf*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+
+/*c0*/ { "", TRUE, BYTE, op2(Ib, E), (char *)db_Grp2 },
+/*c1*/ { "", TRUE, LONG, op2(Ib, E), (char *)db_Grp2 },
+/*c2*/ { "ret", FALSE, NONE, op1(Iw), 0 },
+/*c3*/ { "ret", FALSE, NONE, 0, 0 },
+/*c4*/ { "les", TRUE, LONG, op2(E, R), 0 },
+/*c5*/ { "lds", TRUE, LONG, op2(E, R), 0 },
+/*c6*/ { "mov", TRUE, BYTE, op2(I, E), 0 },
+/*c7*/ { "mov", TRUE, LONG, op2(I, E), 0 },
+
+/*c8*/ { "enter", FALSE, NONE, op2(Ib, Iw), 0 },
+/*c9*/ { "leave", FALSE, NONE, 0, 0 },
+/*ca*/ { "lret", FALSE, NONE, op1(Iw), 0 },
+/*cb*/ { "lret", FALSE, NONE, 0, 0 },
+/*cc*/ { "int", FALSE, NONE, op1(o3), 0 },
+/*cd*/ { "int", FALSE, NONE, op1(Ib), 0 },
+/*ce*/ { "into", FALSE, NONE, 0, 0 },
+/*cf*/ { "iret", FALSE, NONE, 0, 0 },
+
+/*d0*/ { "", TRUE, BYTE, op2(o1, E), (char *)db_Grp2 },
+/*d1*/ { "", TRUE, LONG, op2(o1, E), (char *)db_Grp2 },
+/*d2*/ { "", TRUE, BYTE, op2(CL, E), (char *)db_Grp2 },
+/*d3*/ { "", TRUE, LONG, op2(CL, E), (char *)db_Grp2 },
+/*d4*/ { "aam", TRUE, NONE, 0, 0 },
+/*d5*/ { "aad", TRUE, NONE, 0, 0 },
+/*d6*/ { "", FALSE, NONE, 0, 0 },
+/*d7*/ { "xlat", FALSE, BYTE, op1(BX), 0 },
+
+/*d8*/ { "", TRUE, NONE, 0, (char *)db_Esc8 },
+/*d9*/ { "", TRUE, NONE, 0, (char *)db_Esc9 },
+/*da*/ { "", TRUE, NONE, 0, (char *)db_Esca },
+/*db*/ { "", TRUE, NONE, 0, (char *)db_Escb },
+/*dc*/ { "", TRUE, NONE, 0, (char *)db_Escc },
+/*dd*/ { "", TRUE, NONE, 0, (char *)db_Escd },
+/*de*/ { "", TRUE, NONE, 0, (char *)db_Esce },
+/*df*/ { "", TRUE, NONE, 0, (char *)db_Escf },
+
+/*e0*/ { "loopne",FALSE, NONE, op1(Db), 0 },
+/*e1*/ { "loope", FALSE, NONE, op1(Db), 0 },
+/*e2*/ { "loop", FALSE, NONE, op1(Db), 0 },
+/*e3*/ { "jcxz", FALSE, SDEP, op1(Db), "jecxz" },
+/*e4*/ { "in", FALSE, BYTE, op2(Ib, A), 0 },
+/*e5*/ { "in", FALSE, LONG, op2(Ib, A) , 0 },
+/*e6*/ { "out", FALSE, BYTE, op2(A, Ib), 0 },
+/*e7*/ { "out", FALSE, LONG, op2(A, Ib) , 0 },
+
+/*e8*/ { "call", FALSE, NONE, op1(Dl), 0 },
+/*e9*/ { "jmp", FALSE, NONE, op1(Dl), 0 },
+/*ea*/ { "ljmp", FALSE, NONE, op1(OS), 0 },
+/*eb*/ { "jmp", FALSE, NONE, op1(Db), 0 },
+/*ec*/ { "in", FALSE, BYTE, op2(DX, A), 0 },
+/*ed*/ { "in", FALSE, LONG, op2(DX, A) , 0 },
+/*ee*/ { "out", FALSE, BYTE, op2(A, DX), 0 },
+/*ef*/ { "out", FALSE, LONG, op2(A, DX) , 0 },
+
+/*f0*/ { "", FALSE, NONE, 0, 0 },
+/*f1*/ { "", FALSE, NONE, 0, 0 },
+/*f2*/ { "", FALSE, NONE, 0, 0 },
+/*f3*/ { "", FALSE, NONE, 0, 0 },
+/*f4*/ { "hlt", FALSE, NONE, 0, 0 },
+/*f5*/ { "cmc", FALSE, NONE, 0, 0 },
+/*f6*/ { "", TRUE, BYTE, 0, (char *)db_Grp3 },
+/*f7*/ { "", TRUE, LONG, 0, (char *)db_Grp3 },
+
+/*f8*/ { "clc", FALSE, NONE, 0, 0 },
+/*f9*/ { "stc", FALSE, NONE, 0, 0 },
+/*fa*/ { "cli", FALSE, NONE, 0, 0 },
+/*fb*/ { "sti", FALSE, NONE, 0, 0 },
+/*fc*/ { "cld", FALSE, NONE, 0, 0 },
+/*fd*/ { "std", FALSE, NONE, 0, 0 },
+/*fe*/ { "", TRUE, NONE, 0, (char *)db_Grp4 },
+/*ff*/ { "", TRUE, NONE, 0, (char *)db_Grp5 },
+};
+
+struct inst db_bad_inst =
+ { "???", FALSE, NONE, 0, 0 }
+;
+
+#define f_mod(byte) ((byte)>>6)
+#define f_reg(byte) (((byte)>>3)&0x7)
+#define f_rm(byte) ((byte)&0x7)
+
+#define sib_ss(byte) ((byte)>>6)
+#define sib_index(byte) (((byte)>>3)&0x7)
+#define sib_base(byte) ((byte)&0x7)
+
+struct i_addr {
+ int is_reg; /* if reg, reg number is in 'disp' */
+ int disp;
+ char * base;
+ char * index;
+ int ss;
+};
+
+char * db_index_reg_16[8] = {
+ "%bx,%si",
+ "%bx,%di",
+ "%bp,%si",
+ "%bp,%di",
+ "%si",
+ "%di",
+ "%bp",
+ "%bx"
+};
+
+char * db_reg[3][8] = {
+ "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh",
+ "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di",
+ "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi"
+};
+
+char * db_seg_reg[8] = {
+ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "", ""
+};
+
+/*
+ * lengths for size attributes
+ */
+int db_lengths[] = {
+ 1, /* BYTE */
+ 2, /* WORD */
+ 4, /* LONG */
+ 8, /* QUAD */
+ 4, /* SNGL */
+ 8, /* DBLR */
+ 10, /* EXTR */
+};
+
+#define get_value_inc(result, loc, size, is_signed) \
+ result = db_get_value((loc), (size), (is_signed)); \
+ (loc) += (size);
+
+/*
+ * Read address at location and return updated location.
+ */
+db_addr_t
+db_read_address(loc, short_addr, regmodrm, addrp)
+ db_addr_t loc;
+ int short_addr;
+ int regmodrm;
+ struct i_addr *addrp; /* out */
+{
+ int mod, rm, sib, index, ss, disp;
+
+ mod = f_mod(regmodrm);
+ rm = f_rm(regmodrm);
+
+ if (mod == 3) {
+ addrp->is_reg = TRUE;
+ addrp->disp = rm;
+ return (loc);
+ }
+ addrp->is_reg = FALSE;
+ addrp->index = 0;
+
+ if (short_addr) {
+ addrp->index = 0;
+ addrp->ss = 0;
+ switch (mod) {
+ case 0:
+ if (rm == 6) {
+ get_value_inc(disp, loc, 2, TRUE);
+ addrp->disp = disp;
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_index_reg_16[rm];
+ }
+ break;
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ case 2:
+ get_value_inc(disp, loc, 2, TRUE);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ }
+ }
+ else {
+ if (mod != 3 && rm == 4) {
+ get_value_inc(sib, loc, 1, FALSE);
+ rm = sib_base(sib);
+ index = sib_index(sib);
+ if (index != 4)
+ addrp->index = db_reg[LONG][index];
+ addrp->ss = sib_ss(sib);
+ }
+
+ switch (mod) {
+ case 0:
+ if (rm == 5) {
+ get_value_inc(addrp->disp, loc, 4, FALSE);
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_reg[LONG][rm];
+ }
+ break;
+
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+
+ case 2:
+ get_value_inc(disp, loc, 4, FALSE);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+ }
+ }
+ return (loc);
+}
+
+void
+db_print_address(seg, size, addrp)
+ char * seg;
+ int size;
+ struct i_addr *addrp;
+{
+ if (addrp->is_reg) {
+ db_printf("%s", db_reg[size][addrp->disp]);
+ return;
+ }
+
+ if (seg) {
+ db_printf("%s:", seg);
+ }
+
+ db_printsym((db_addr_t)addrp->disp, DB_STGY_ANY);
+ if (addrp->base != 0 || addrp->index != 0) {
+ db_printf("(");
+ if (addrp->base)
+ db_printf("%s", addrp->base);
+ if (addrp->index)
+ db_printf(",%s,%d", addrp->index, 1<<addrp->ss);
+ db_printf(")");
+ }
+}
+
+/*
+ * Disassemble floating-point ("escape") instruction
+ * and return updated location.
+ */
+db_addr_t
+db_disasm_esc(loc, inst, short_addr, size, seg)
+ db_addr_t loc;
+ int inst;
+ int short_addr;
+ int size;
+ char * seg;
+{
+ int regmodrm;
+ struct finst *fp;
+ int mod;
+ struct i_addr address;
+ char * name;
+
+ get_value_inc(regmodrm, loc, 1, FALSE);
+ fp = &db_Esc_inst[inst - 0xd8][f_reg(regmodrm)];
+ mod = f_mod(regmodrm);
+ if (mod != 3) {
+ /*
+ * Normal address modes.
+ */
+ loc = db_read_address(loc, short_addr, regmodrm, &address);
+ db_printf(fp->f_name);
+ switch(fp->f_size) {
+ case SNGL:
+ db_printf("s");
+ break;
+ case DBLR:
+ db_printf("l");
+ break;
+ case EXTR:
+ db_printf("t");
+ break;
+ case WORD:
+ db_printf("s");
+ break;
+ case LONG:
+ db_printf("l");
+ break;
+ case QUAD:
+ db_printf("q");
+ break;
+ default:
+ break;
+ }
+ db_printf("\t");
+ db_print_address(seg, BYTE, &address);
+ }
+ else {
+ /*
+ * 'reg-reg' - special formats
+ */
+ switch (fp->f_rrmode) {
+ case op2(ST,STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st,%%st(%d)",name,f_rm(regmodrm));
+ break;
+ case op2(STI,ST):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d),%%st",name, f_rm(regmodrm));
+ break;
+ case op1(STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d)",name, f_rm(regmodrm));
+ break;
+ case op1(X):
+ db_printf("%s", ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ case op1(XA):
+ db_printf("%s\t%%ax",
+ ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ default:
+ db_printf("<bad instruction>");
+ break;
+ }
+ }
+
+ return (loc);
+}
+
+/*
+ * Disassemble instruction at 'loc'. 'altfmt' specifies an
+ * (optional) alternate format. Return address of start of
+ * next instruction.
+ */
+db_addr_t
+db_disasm(loc, altfmt)
+ db_addr_t loc;
+ boolean_t altfmt;
+{
+ int inst;
+ int size;
+ int short_addr;
+ char * seg;
+ struct inst * ip;
+ char * i_name;
+ int i_size;
+ int i_mode;
+ int regmodrm;
+ boolean_t first;
+ int displ;
+ int prefix;
+ int imm;
+ int imm2;
+ int len;
+ struct i_addr address;
+
+ get_value_inc(inst, loc, 1, FALSE);
+ short_addr = FALSE;
+ size = LONG;
+ seg = 0;
+
+ /*
+ * Get prefixes
+ */
+ prefix = TRUE;
+ do {
+ switch (inst) {
+ case 0x66: /* data16 */
+ size = WORD;
+ break;
+ case 0x67:
+ short_addr = TRUE;
+ break;
+ case 0x26:
+ seg = "%es";
+ break;
+ case 0x36:
+ seg = "%ss";
+ break;
+ case 0x2e:
+ seg = "%cs";
+ break;
+ case 0x3e:
+ seg = "%ds";
+ break;
+ case 0x64:
+ seg = "%fs";
+ break;
+ case 0x65:
+ seg = "%gs";
+ break;
+ case 0xf0:
+ db_printf("lock ");
+ break;
+ case 0xf2:
+ db_printf("repne ");
+ break;
+ case 0xf3:
+ db_printf("repe "); /* XXX repe VS rep */
+ break;
+ default:
+ prefix = FALSE;
+ break;
+ }
+ if (prefix) {
+ get_value_inc(inst, loc, 1, FALSE);
+ }
+ } while (prefix);
+
+ if (inst >= 0xd8 && inst <= 0xdf) {
+ loc = db_disasm_esc(loc, inst, short_addr, size, seg);
+ db_printf("\n");
+ return (loc);
+ }
+
+ if (inst == 0x0f) {
+ get_value_inc(inst, loc, 1, FALSE);
+ ip = db_inst_0f[inst>>4];
+ if (ip == 0) {
+ ip = &db_bad_inst;
+ }
+ else {
+ ip = &ip[inst&0xf];
+ }
+ }
+ else
+ ip = &db_inst_table[inst];
+
+ if (ip->i_has_modrm) {
+ get_value_inc(regmodrm, loc, 1, FALSE);
+ loc = db_read_address(loc, short_addr, regmodrm, &address);
+ }
+
+ i_name = ip->i_name;
+ i_size = ip->i_size;
+ i_mode = ip->i_mode;
+
+ if (ip->i_extra == (char *)db_Grp1 ||
+ ip->i_extra == (char *)db_Grp2 ||
+ ip->i_extra == (char *)db_Grp6 ||
+ ip->i_extra == (char *)db_Grp7 ||
+ ip->i_extra == (char *)db_Grp8) {
+ i_name = ((char **)ip->i_extra)[f_reg(regmodrm)];
+ }
+ else if (ip->i_extra == (char *)db_Grp3) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ }
+ else if (ip->i_extra == (char *)db_Grp4 ||
+ ip->i_extra == (char *)db_Grp5) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ i_size = ip->i_size;
+ }
+
+ if (i_size == SDEP) {
+ if (size == WORD)
+ db_printf(i_name);
+ else
+ db_printf(ip->i_extra);
+ }
+ else {
+ db_printf(i_name);
+ if (i_size != NONE) {
+ if (i_size == BYTE) {
+ db_printf("b");
+ size = BYTE;
+ }
+ else if (i_size == WORD) {
+ db_printf("w");
+ size = WORD;
+ }
+ else if (size == WORD)
+ db_printf("w");
+ else
+ db_printf("l");
+ }
+ }
+ db_printf("\t");
+ for (first = TRUE;
+ i_mode != 0;
+ i_mode >>= 8, first = FALSE)
+ {
+ if (!first)
+ db_printf(",");
+
+ switch (i_mode & 0xFF) {
+
+ case E:
+ db_print_address(seg, size, &address);
+ break;
+
+ case Eind:
+ db_printf("*");
+ db_print_address(seg, size, &address);
+ break;
+
+ case Ew:
+ db_print_address(seg, WORD, &address);
+ break;
+
+ case Eb:
+ db_print_address(seg, BYTE, &address);
+ break;
+
+ case R:
+ db_printf("%s", db_reg[size][f_reg(regmodrm)]);
+ break;
+
+ case Rw:
+ db_printf("%s", db_reg[WORD][f_reg(regmodrm)]);
+ break;
+
+ case Ri:
+ db_printf("%s", db_reg[size][f_rm(inst)]);
+ break;
+
+ case S:
+ db_printf("%s", db_seg_reg[f_reg(regmodrm)]);
+ break;
+
+ case Si:
+ db_printf("%s", db_seg_reg[f_reg(inst)]);
+ break;
+
+ case A:
+ db_printf("%s", db_reg[size][0]); /* acc */
+ break;
+
+ case BX:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%bx" : "%ebx");
+ break;
+
+ case CL:
+ db_printf("%%cl");
+ break;
+
+ case DX:
+ db_printf("%%dx");
+ break;
+
+ case SI:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%si" : "%esi");
+ break;
+
+ case DI:
+ db_printf("%%es:(%s)", short_addr ? "%di" : "%edi");
+ break;
+
+ case CR:
+ db_printf("%%cr%d", f_reg(regmodrm));
+ break;
+
+ case DR:
+ db_printf("%%dr%d", f_reg(regmodrm));
+ break;
+
+ case TR:
+ db_printf("%%tr%d", f_reg(regmodrm));
+ break;
+
+ case I:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, FALSE);/* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Is:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, TRUE); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Ib:
+ get_value_inc(imm, loc, 1, FALSE); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Ibs:
+ get_value_inc(imm, loc, 1, TRUE); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Iw:
+ get_value_inc(imm, loc, 2, FALSE); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Il:
+ get_value_inc(imm, loc, 4, FALSE);
+ db_printf("$%#n", imm);
+ break;
+
+ case O:
+ if (short_addr) {
+ get_value_inc(displ, loc, 2, TRUE);
+ }
+ else {
+ get_value_inc(displ, loc, 4, TRUE);
+ }
+ if (seg)
+ db_printf("%s:%#r",seg, displ);
+ else
+ db_printsym((db_addr_t)displ, DB_STGY_ANY);
+ break;
+
+ case Db:
+ get_value_inc(displ, loc, 1, TRUE);
+ db_printsym((db_addr_t)(displ + loc), DB_STGY_XTRN);
+ break;
+
+ case Dl:
+ get_value_inc(displ, loc, 4, TRUE);
+ db_printsym((db_addr_t)(displ + loc), DB_STGY_XTRN);
+ break;
+
+ case o1:
+ db_printf("$1");
+ break;
+
+ case o3:
+ db_printf("$3");
+ break;
+
+ case OS:
+ get_value_inc(imm, loc, 4, FALSE); /* offset */
+ get_value_inc(imm2, loc, 2, FALSE); /* segment */
+ db_printf("$%#n,%#n", imm2, imm);
+ break;
+ }
+ }
+
+ if (altfmt == 0) {
+ if (inst == 0xe9 || inst == 0xeb) {
+ /*
+ * GAS pads to longword boundary after unconditional jumps.
+ */
+ loc = (loc + (4-1)) & ~(4-1);
+ }
+ }
+ db_printf("\n");
+ return (loc);
+}
+
diff --git a/sys/amd64/amd64/db_interface.c b/sys/amd64/amd64/db_interface.c
new file mode 100644
index 0000000..31e7849
--- /dev/null
+++ b/sys/amd64/amd64/db_interface.c
@@ -0,0 +1,255 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: db_interface.c,v $
+ * Revision 1.1 1992/03/25 21:42:03 pace
+ * Initial revision
+ *
+ * Revision 2.4 91/02/05 17:11:13 mrt
+ * Changed to new Mach copyright
+ * [91/02/01 17:31:17 mrt]
+ *
+ * Revision 2.3 90/12/04 14:45:55 jsb
+ * Changes for merged intel/pmap.{c,h}.
+ * [90/12/04 11:14:41 jsb]
+ *
+ * Revision 2.2 90/10/25 14:44:43 rwd
+ * Added watchpoint support.
+ * [90/10/18 rpd]
+ *
+ * Created.
+ * [90/07/25 dbg]
+ *
+ *
+ */
+
+/*
+ * Interface to new debugger.
+ */
+#include "param.h"
+#include "proc.h"
+#include <machine/db_machdep.h>
+
+#include <sys/reboot.h>
+#include <vm/vm_statistics.h>
+#include <vm/pmap.h>
+
+#include <setjmp.h>
+#include <sys/systm.h> /* just for boothowto --eichin */
+int db_active = 0;
+
+/*
+ * Received keyboard interrupt sequence.
+ */
+kdb_kbd_trap(regs)
+ struct i386_saved_state *regs;
+{
+ if (db_active == 0 && (boothowto & RB_KDB)) {
+ printf("\n\nkernel: keyboard interrupt\n");
+ kdb_trap(-1, 0, regs);
+ }
+}
+
+/*
+ * kdb_trap - field a TRACE or BPT trap
+ */
+
+static jmp_buf *db_nofault = 0;
+
+kdb_trap(type, code, regs)
+ int type, code;
+ register struct i386_saved_state *regs;
+{
+#if 0
+ if ((boothowto&RB_KDB) == 0)
+ return(0);
+#endif
+
+ switch (type) {
+ case T_BPTFLT /* T_INT3 */: /* breakpoint */
+ case T_KDBTRAP /* T_WATCHPOINT */: /* watchpoint */
+ case T_PRIVINFLT /* T_DEBUG */: /* single_step */
+
+ case -1: /* keyboard interrupt */
+ break;
+
+ default:
+ kdbprinttrap(type, code);
+
+ if (db_nofault) {
+ jmp_buf *no_fault = db_nofault;
+ db_nofault = 0;
+ longjmp(*no_fault, 1);
+ }
+ }
+
+ /* Should switch to kdb`s own stack here. */
+
+ ddb_regs = *regs;
+
+ if ((regs->tf_cs & 0x3) == 0) {
+ /*
+ * Kernel mode - esp and ss not saved
+ */
+ ddb_regs.tf_esp = (int)&regs->tf_esp; /* kernel stack pointer */
+#if 0
+ ddb_regs.ss = KERNEL_DS;
+#endif
+ asm(" movw %%ss,%%ax; movl %%eax,%0 "
+ : "=g" (ddb_regs.tf_ss)
+ :
+ : "ax");
+ }
+
+ db_active++;
+ cnpollc(TRUE);
+ db_trap(type, code);
+ cnpollc(FALSE);
+ db_active--;
+
+ regs->tf_eip = ddb_regs.tf_eip;
+ regs->tf_eflags = ddb_regs.tf_eflags;
+ regs->tf_eax = ddb_regs.tf_eax;
+ regs->tf_ecx = ddb_regs.tf_ecx;
+ regs->tf_edx = ddb_regs.tf_edx;
+ regs->tf_ebx = ddb_regs.tf_ebx;
+ if (regs->tf_cs & 0x3) {
+ /*
+ * user mode - saved esp and ss valid
+ */
+ regs->tf_esp = ddb_regs.tf_esp; /* user stack pointer */
+ regs->tf_ss = ddb_regs.tf_ss & 0xffff; /* user stack segment */
+ }
+ regs->tf_ebp = ddb_regs.tf_ebp;
+ regs->tf_esi = ddb_regs.tf_esi;
+ regs->tf_edi = ddb_regs.tf_edi;
+ regs->tf_es = ddb_regs.tf_es & 0xffff;
+ regs->tf_cs = ddb_regs.tf_cs & 0xffff;
+ regs->tf_ds = ddb_regs.tf_ds & 0xffff;
+#if 0
+ regs->tf_fs = ddb_regs.tf_fs & 0xffff;
+ regs->tf_gs = ddb_regs.tf_gs & 0xffff;
+#endif
+
+ return (1);
+}
+
+/*
+ * Print trap reason.
+ */
+kdbprinttrap(type, code)
+ int type, code;
+{
+ printf("kernel: ");
+ printf("type %d", type);
+ printf(" trap, code=%x\n", code);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+
+extern jmp_buf db_jmpbuf;
+
+void
+db_read_bytes(addr, size, data)
+ vm_offset_t addr;
+ register int size;
+ register char *data;
+{
+ register char *src;
+
+ db_nofault = &db_jmpbuf;
+
+ src = (char *)addr;
+ while (--size >= 0)
+ *data++ = *src++;
+
+ db_nofault = 0;
+}
+
+struct pte *pmap_pte(pmap_t, vm_offset_t);
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(addr, size, data)
+ vm_offset_t addr;
+ register int size;
+ register char *data;
+{
+ register char *dst;
+
+ register pt_entry_t *ptep0 = 0;
+ pt_entry_t oldmap0 = { 0 };
+ vm_offset_t addr1;
+ register pt_entry_t *ptep1 = 0;
+ pt_entry_t oldmap1 = { 0 };
+ extern char etext;
+
+ db_nofault = &db_jmpbuf;
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS &&
+ addr <= (vm_offset_t)&etext)
+ {
+ ptep0 = pmap_pte(kernel_pmap, addr);
+ oldmap0 = *ptep0;
+ *(int *)ptep0 |= /* INTEL_PTE_WRITE */ PG_RW;
+
+ addr1 = i386_trunc_page(addr + size - 1);
+ if (i386_trunc_page(addr) != addr1) {
+ /* data crosses a page boundary */
+
+ ptep1 = pmap_pte(kernel_pmap, addr1);
+ oldmap1 = *ptep1;
+ *(int *)ptep1 |= /* INTEL_PTE_WRITE */ PG_RW;
+ }
+ tlbflush();
+ }
+
+ dst = (char *)addr;
+
+ while (--size >= 0)
+ *dst++ = *data++;
+
+ db_nofault = 0;
+
+ if (ptep0) {
+ *ptep0 = oldmap0;
+ if (ptep1) {
+ *ptep1 = oldmap1;
+ }
+ tlbflush();
+ }
+}
+
+Debugger (msg)
+char *msg;
+{
+ asm ("int $3");
+}
diff --git a/sys/amd64/amd64/db_trace.c b/sys/amd64/amd64/db_trace.c
new file mode 100644
index 0000000..cbffbbc
--- /dev/null
+++ b/sys/amd64/amd64/db_trace.c
@@ -0,0 +1,292 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: db_trace.c,v $
+ * Revision 1.1 1992/03/25 21:42:05 pace
+ * Initial revision
+ *
+ * Revision 2.6 91/02/05 17:11:21 mrt
+ * Changed to new Mach copyright
+ * [91/02/01 17:31:32 mrt]
+ *
+ * Revision 2.5 91/01/09 19:55:27 rpd
+ * Fixed stack tracing for threads without kernel stacks.
+ * [91/01/09 rpd]
+ *
+ * Revision 2.4 91/01/08 15:10:22 rpd
+ * Reorganized the pcb.
+ * [90/12/11 rpd]
+ *
+ * Revision 2.3 90/11/05 14:27:07 rpd
+ * If we can not guess the number of args to a function, use 5 vs 0.
+ * [90/11/02 rvb]
+ *
+ * Revision 2.2 90/08/27 21:56:20 dbg
+ * Import db_sym.h.
+ * [90/08/21 dbg]
+ * Fix includes.
+ * [90/08/08 dbg]
+ * Created from rvb's code for new debugger.
+ * [90/07/11 dbg]
+ *
+ */
+#include "param.h"
+#include "proc.h"
+#include <machine/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+
+/*
+ * Machine register set.
+ */
+struct db_variable db_regs[] = {
+ "cs", (int *)&ddb_regs.tf_cs, FCN_NULL,
+ "ds", (int *)&ddb_regs.tf_ds, FCN_NULL,
+ "es", (int *)&ddb_regs.tf_es, FCN_NULL,
+#if 0
+ "fs", (int *)&ddb_regs.tf_fs, FCN_NULL,
+ "gs", (int *)&ddb_regs.tf_gs, FCN_NULL,
+#endif
+ "ss", (int *)&ddb_regs.tf_ss, FCN_NULL,
+ "eax", (int *)&ddb_regs.tf_eax, FCN_NULL,
+ "ecx", (int *)&ddb_regs.tf_ecx, FCN_NULL,
+ "edx", (int *)&ddb_regs.tf_edx, FCN_NULL,
+ "ebx", (int *)&ddb_regs.tf_ebx, FCN_NULL,
+ "esp", (int *)&ddb_regs.tf_esp,FCN_NULL,
+ "ebp", (int *)&ddb_regs.tf_ebp, FCN_NULL,
+ "esi", (int *)&ddb_regs.tf_esi, FCN_NULL,
+ "edi", (int *)&ddb_regs.tf_edi, FCN_NULL,
+ "eip", (int *)&ddb_regs.tf_eip, FCN_NULL,
+ "efl", (int *)&ddb_regs.tf_eflags, FCN_NULL,
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+/*
+ * Stack trace.
+ */
+#define INKERNEL(va) (((vm_offset_t)(va)) >= VM_MIN_KERNEL_ADDRESS)
+
+struct i386_frame {
+ struct i386_frame *f_frame;
+ int f_retaddr;
+ int f_arg0;
+};
+
+#define TRAP 1
+#define INTERRUPT 2
+
+db_addr_t db_trap_symbol_value = 0;
+db_addr_t db_kdintr_symbol_value = 0;
+boolean_t db_trace_symbols_found = FALSE;
+
+void
+db_find_trace_symbols()
+{
+ db_expr_t value;
+ if (db_value_of_name("_trap", &value))
+ db_trap_symbol_value = (db_addr_t) value;
+ if (db_value_of_name("_kdintr", &value))
+ db_kdintr_symbol_value = (db_addr_t) value;
+ db_trace_symbols_found = TRUE;
+}
+
+/*
+ * Figure out how many arguments were passed into the frame at "fp".
+ */
+int
+db_numargs(fp)
+ struct i386_frame *fp;
+{
+ int *argp;
+ int inst;
+ int args;
+ extern char etext[];
+
+ argp = (int *)db_get_value((int)&fp->f_retaddr, 4, FALSE);
+ if (argp < (int *)VM_MIN_KERNEL_ADDRESS || argp > (int *)etext)
+ args = 5;
+ else {
+ inst = db_get_value((int)argp, 4, FALSE);
+ if ((inst & 0xff) == 0x59) /* popl %ecx */
+ args = 1;
+ else if ((inst & 0xffff) == 0xc483) /* addl %n, %esp */
+ args = ((inst >> 16) & 0xff) / 4;
+ else
+ args = 5;
+ }
+ return (args);
+}
+
+/*
+ * Figure out the next frame up in the call stack.
+ * For trap(), we print the address of the faulting instruction and
+ * proceed with the calling frame. We return the ip that faulted.
+ * If the trap was caused by jumping through a bogus pointer, then
+ * the next line in the backtrace will list some random function as
+ * being called. It should get the argument list correct, though.
+ * It might be possible to dig out from the next frame up the name
+ * of the function that faulted, but that could get hairy.
+ */
+void
+db_nextframe(fp, ip, argp, is_trap)
+ struct i386_frame **fp; /* in/out */
+ db_addr_t *ip; /* out */
+ int *argp; /* in */
+ int is_trap; /* in */
+{
+ struct i386_saved_state *saved_regs;
+
+ if (is_trap == 0) {
+ *ip = (db_addr_t)
+ db_get_value((int) &(*fp)->f_retaddr, 4, FALSE);
+ *fp = (struct i386_frame *)
+ db_get_value((int) &(*fp)->f_frame, 4, FALSE);
+ } else {
+ /*
+ * We know that trap() has 1 argument and we know that
+ * it is an (int *).
+ */
+ saved_regs = (struct i386_saved_state *)
+ db_get_value((int)argp, 4, FALSE);
+ db_printf("--- trap (number %d) ---\n",
+ saved_regs->tf_trapno & 0xffff);
+ db_printsym(saved_regs->tf_eip, DB_STGY_XTRN);
+ db_printf(":\n");
+ *fp = (struct i386_frame *)saved_regs->tf_ebp;
+ *ip = (db_addr_t)saved_regs->tf_eip;
+ }
+
+}
+
+void
+db_stack_trace_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char *modif;
+{
+ struct i386_frame *frame, *lastframe;
+ int *argp;
+ db_addr_t callpc;
+ int is_trap;
+ boolean_t kernel_only = TRUE;
+ boolean_t trace_thread = FALSE;
+
+ if (!db_trace_symbols_found)
+ db_find_trace_symbols();
+
+ {
+ register char *cp = modif;
+ register char c;
+
+ while ((c = *cp++) != 0) {
+ if (c == 't')
+ trace_thread = TRUE;
+ if (c == 'u')
+ kernel_only = FALSE;
+ }
+ }
+
+ if (count == -1)
+ count = 65535;
+
+ if (!have_addr) {
+ frame = (struct i386_frame *)ddb_regs.tf_ebp;
+ callpc = (db_addr_t)ddb_regs.tf_eip;
+ }
+ else if (trace_thread) {
+ printf ("db_trace.c: can't trace thread\n");
+ }
+ else {
+ frame = (struct i386_frame *)addr;
+ callpc = (db_addr_t)db_get_value((int)&frame->f_retaddr, 4, FALSE);
+ }
+
+ lastframe = 0;
+ while (count-- && frame != 0) {
+ register int narg;
+ char * name;
+ db_expr_t offset;
+
+ if (INKERNEL((int)frame) && callpc == db_trap_symbol_value) {
+ narg = 1;
+ is_trap = TRAP;
+ }
+ else
+ if (INKERNEL((int)frame) && callpc == db_kdintr_symbol_value) {
+ is_trap = INTERRUPT;
+ narg = 0;
+ }
+ else {
+ is_trap = 0;
+ narg = db_numargs(frame);
+ }
+
+ db_find_sym_and_offset(callpc, &name, &offset);
+ db_printf("%s(", name);
+
+ argp = &frame->f_arg0;
+ while (narg) {
+ db_printf("%x", db_get_value((int)argp, 4, FALSE));
+ argp++;
+ if (--narg != 0)
+ db_printf(",");
+ }
+ db_printf(") at ");
+ db_printsym(callpc, DB_STGY_XTRN);
+ db_printf("\n");
+
+ lastframe = frame;
+ db_nextframe(&frame, &callpc, &frame->f_arg0, is_trap);
+
+ if (frame == 0) {
+ /* end of chain */
+ break;
+ }
+ if (INKERNEL((int)frame)) {
+ /* staying in kernel */
+ if (frame <= lastframe) {
+ db_printf("Bad frame pointer: 0x%x\n", frame);
+ break;
+ }
+ }
+ else if (INKERNEL((int)lastframe)) {
+ /* switch from user to kernel */
+ if (kernel_only)
+ break; /* kernel stack only */
+ }
+ else {
+ /* in user */
+ if (frame <= lastframe) {
+ db_printf("Bad frame pointer: 0x%x\n", frame);
+ break;
+ }
+ }
+ }
+}
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
new file mode 100644
index 0000000..73392fa
--- /dev/null
+++ b/sys/amd64/amd64/fpu.c
@@ -0,0 +1,564 @@
+/*-
+ * Copyright (c) 1990 William Jolitz.
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)npx.c 7.2 (Berkeley) 5/12/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00154
+ * -------------------- ----- ----------------------
+ *
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ * 23 May 93 Rodney W. Grimes Return a special value of -1 from
+ * the probe code to keep isa_config from
+ * printing out the I/O address when we
+ * are using trap 16 handling.
+ *
+ */
+static char rcsid[] = "$Header: /usr/bill/working/sys/i386/isa/RCS/npx.c,v 1.2 92/01/21 14:34:27 william Exp $";
+
+#include "npx.h"
+#if NNPX > 0
+
+#include "param.h"
+#include "systm.h"
+#include "conf.h"
+#include "file.h"
+#include "proc.h"
+#include "machine/cpu.h"
+#include "machine/pcb.h"
+#include "machine/trap.h"
+#include "ioctl.h"
+#include "machine/specialreg.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/isa_device.h"
+#include "i386/isa/isa.h"
+
+/*
+ * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
+ */
+
+#ifdef __GNUC__
+
+#define disable_intr() __asm("cli")
+#define enable_intr() __asm("sti")
+#define fldcw(addr) __asm("fldcw %0" : : "m" (*addr))
+#define fnclex() __asm("fnclex")
+#define fninit() __asm("fninit")
+#define fnsave(addr) __asm("fnsave %0" : "=m" (*addr) : "0" (*addr))
+#define fnstcw(addr) __asm("fnstcw %0" : "=m" (*addr) : "0" (*addr))
+#define fnstsw(addr) __asm("fnstsw %0" : "=m" (*addr) : "0" (*addr))
+#define fp_divide_by_0() __asm("fldz; fld1; fdiv %st,%st(1); fwait")
+#define frstor(addr) __asm("frstor %0" : : "m" (*addr))
+#define fwait() __asm("fwait")
+#define read_eflags() ({u_long ef; \
+ __asm("pushf; popl %0" : "=a" (ef)); \
+ ef; })
+#define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
+ : : "n" (CR0_TS) : "ax")
+#define stop_emulating() __asm("clts")
+#define write_eflags(ef) __asm("pushl %0; popf" : : "a" ((u_long) ef))
+
+#else /* not __GNUC__ */
+
+void disable_intr __P((void));
+void enable_intr __P((void));
+void fldcw __P((caddr_t addr));
+void fnclex __P((void));
+void fninit __P((void));
+void fnsave __P((caddr_t addr));
+void fnstcw __P((caddr_t addr));
+void fnstsw __P((caddr_t addr));
+void fp_divide_by_0 __P((void));
+void frstor __P((caddr_t addr));
+void fwait __P((void));
+u_long read_eflags __P((void));
+void start_emulating __P((void));
+void stop_emulating __P((void));
+void write_eflags __P((u_long ef));
+
+#endif /* __GNUC__ */
+
+typedef u_char bool_t;
+
+extern struct gate_descriptor idt[];
+
+int npxdna __P((void));
+void npxexit __P((struct proc *p));
+void npxinit __P((u_int control));
+void npxintr __P((struct intrframe frame));
+void npxsave __P((struct save87 *addr));
+static int npxattach __P((struct isa_device *dvp));
+static int npxprobe __P((struct isa_device *dvp));
+static int npxprobe1 __P((struct isa_device *dvp));
+
+struct isa_driver npxdriver = {
+ npxprobe, npxattach, "npx",
+};
+
+u_int npx0mask;
+struct proc *npxproc;
+
+static bool_t npx_ex16;
+static bool_t npx_exists;
+static struct gate_descriptor npx_idt_probeintr;
+static int npx_intrno;
+static volatile u_int npx_intrs_while_probing;
+static bool_t npx_irq13;
+static volatile u_int npx_traps_while_probing;
+
+/*
+ * Special interrupt handlers. Someday intr0-intr15 will be used to count
+ * interrupts. We'll still need a special exception 16 handler. The busy
+ * latch stuff in probintr() can be moved to npxprobe().
+ */
+void probeintr(void);
+asm
+("
+ .text
+_probeintr:
+ ss
+ incl _npx_intrs_while_probing
+ pushl %eax
+ movb $0x20,%al /* EOI (asm in strings loses cpp features) */
+ outb %al,$0xa0 /* IO_ICU2 */
+ outb %al,$0x20 /* IO_ICU1 */
+ movb $0,%al
+ outb %al,$0xf0 /* clear BUSY# latch */
+ popl %eax
+ iret
+");
+
+void probetrap(void);
+asm
+("
+ .text
+_probetrap:
+ ss
+ incl _npx_traps_while_probing
+ fnclex
+ iret
+");
+
+/*
+ * Probe routine. Initialize cr0 to give correct behaviour for [f]wait
+ * whether the device exists or not (XXX should be elsewhere). Set flags
+ * to tell npxattach() what to do. Modify device struct if npx doesn't
+ * need to use interrupts. Return 1 if device exists.
+ */
+static int
+npxprobe(dvp)
+ struct isa_device *dvp;
+{
+ int result;
+ u_long save_eflags;
+ u_char save_icu1_mask;
+ u_char save_icu2_mask;
+ struct gate_descriptor save_idt_npxintr;
+ struct gate_descriptor save_idt_npxtrap;
+ /*
+ * This routine is now just a wrapper for npxprobe1(), to install
+ * special npx interrupt and trap handlers, to enable npx interrupts
+ * and to disable other interrupts. Someday isa_configure() will
+ * install suitable handlers and run with interrupts enabled so we
+ * won't need to do so much here.
+ */
+ npx_intrno = NRSVIDT + ffs(dvp->id_irq) - 1;
+ save_eflags = read_eflags();
+ disable_intr();
+ save_icu1_mask = inb(IO_ICU1 + 1);
+ save_icu2_mask = inb(IO_ICU2 + 1);
+ save_idt_npxintr = idt[npx_intrno];
+ save_idt_npxtrap = idt[16];
+ outb(IO_ICU1 + 1, ~(IRQ_SLAVE | dvp->id_irq));
+ outb(IO_ICU2 + 1, ~(dvp->id_irq >> 8));
+ setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL);
+ setidt(npx_intrno, probeintr, SDT_SYS386IGT, SEL_KPL);
+ npx_idt_probeintr = idt[npx_intrno];
+ enable_intr();
+ result = npxprobe1(dvp);
+ disable_intr();
+ outb(IO_ICU1 + 1, save_icu1_mask);
+ outb(IO_ICU2 + 1, save_icu2_mask);
+ idt[npx_intrno] = save_idt_npxintr;
+ idt[16] = save_idt_npxtrap;
+ write_eflags(save_eflags);
+ return (result);
+}
+
+static int
+npxprobe1(dvp)
+ struct isa_device *dvp;
+{
+ int control;
+ int status;
+#ifdef lint
+ npxintr();
+#endif
+ /*
+ * Partially reset the coprocessor, if any. Some BIOS's don't reset
+ * it after a warm boot.
+ */
+ outb(0xf1, 0); /* full reset on some systems, NOP on others */
+ outb(0xf0, 0); /* clear BUSY# latch */
+ /*
+ * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
+ * instructions. We must set the CR0_MP bit and use the CR0_TS
+ * bit to control the trap, because setting the CR0_EM bit does
+ * not cause WAIT instructions to trap. It's important to trap
+ * WAIT instructions - otherwise the "wait" variants of no-wait
+ * control instructions would degenerate to the "no-wait" variants
+ * after FP context switches but work correctly otherwise. It's
+ * particularly important to trap WAITs when there is no NPX -
+ * otherwise the "wait" variants would always degenerate.
+ *
+ * Try setting CR0_NE to get correct error reporting on 486DX's.
+ * Setting it should fail or do nothing on lesser processors.
+ */
+ load_cr0(rcr0() | CR0_MP | CR0_NE);
+ /*
+ * But don't trap while we're probing.
+ */
+ stop_emulating();
+ /*
+ * Finish resetting the coprocessor, if any. If there is an error
+ * pending, then we may get a bogus IRQ13, but probeintr() will handle
+ * it OK. Bogus halts have never been observed, but we enabled
+ * IRQ13 and cleared the BUSY# latch early to handle them anyway.
+ */
+ fninit();
+ DELAY(1000); /* wait for any IRQ13 (fwait might hang) */
+#ifdef DIAGNOSTIC
+ if (npx_intrs_while_probing != 0)
+ printf("fninit caused %u bogus npx interrupt(s)\n",
+ npx_intrs_while_probing);
+ if (npx_traps_while_probing != 0)
+ printf("fninit caused %u bogus npx trap(s)\n",
+ npx_traps_while_probing);
+#endif
+ /*
+ * Check for a status of mostly zero.
+ */
+ status = 0x5a5a;
+ fnstsw(&status);
+ if ((status & 0xb8ff) == 0) {
+ /*
+ * Good, now check for a proper control word.
+ */
+ control = 0x5a5a;
+ fnstcw(&control);
+ if ((control & 0x1f3f) == 0x033f) {
+ npx_exists = 1;
+ /*
+ * We have an npx, now divide by 0 to see if exception
+ * 16 works.
+ */
+ control &= ~(1 << 2); /* enable divide by 0 trap */
+ fldcw(&control);
+ npx_traps_while_probing = npx_intrs_while_probing = 0;
+ fp_divide_by_0();
+ if (npx_traps_while_probing != 0) {
+ /*
+ * Good, exception 16 works.
+ */
+ npx_ex16 = 1;
+ dvp->id_irq = 0; /* zap the interrupt */
+ /*
+ * special return value to flag that we do not
+ * actually use any I/O registers
+ */
+ return (-1);
+ }
+ if (npx_intrs_while_probing != 0) {
+ /*
+ * Bad, we are stuck with IRQ13.
+ */
+ npx_irq13 = 1;
+ npx0mask = dvp->id_irq; /* npxattach too late */
+ return (IO_NPXSIZE);
+ }
+ /*
+ * Worse, even IRQ13 is broken. Use emulator.
+ */
+ }
+ }
+ /*
+ * Probe failed, but we want to get to npxattach to initialize the
+ * emulator and say that it has been installed. XXX handle devices
+ * that aren't really devices better.
+ */
+ dvp->id_irq = 0;
+ return (IO_NPXSIZE);
+}
+
+/*
+ * Attach routine - announce which it is, and wire into system
+ */
+int
+npxattach(dvp)
+ struct isa_device *dvp;
+{
+ if (npx_ex16)
+ printf(" <Errors reported via Exception 16>");
+ else if (npx_irq13)
+ printf(" <Errors reported via IRQ 13>");
+ else if (npx_exists)
+ printf(" <Error reporting broken, using 387 emulator>");
+ else
+ printf(" <387 Emulator>");
+ npxinit(__INITIAL_NPXCW__);
+ return (1); /* XXX unused */
+}
+
+/*
+ * Initialize floating point unit.
+ */
+void
+npxinit(control)
+ u_int control;
+{
+ struct save87 dummy;
+
+ if (!npx_exists)
+ return;
+ /*
+ * fninit has the same h/w bugs as fnsave. Use the detoxified
+ * fnsave to throw away any junk in the fpu. fnsave initializes
+ * the fpu and sets npxproc = NULL as important side effects.
+ */
+ npxsave(&dummy);
+ stop_emulating();
+ fldcw(&control);
+ if (curpcb != NULL)
+ fnsave(&curpcb->pcb_savefpu);
+ start_emulating();
+}
+
+/*
+ * Free coprocessor (if we have it).
+ */
+void
+npxexit(p)
+ struct proc *p;
+{
+
+ if (p == npxproc) {
+ start_emulating();
+ npxproc = NULL;
+ }
+}
+
+/*
+ * Record the FPU state and reinitialize it all except for the control word.
+ * Then generate a SIGFPE.
+ *
+ * Reinitializing the state allows naive SIGFPE handlers to longjmp without
+ * doing any fixups.
+ *
+ * XXX there is currently no way to pass the full error state to signal
+ * handlers, and if this is a nested interrupt there is no way to pass even
+ * a status code! So there is no way to have a non-naive SIGFPE handler. At
+ * best a handler could do an fninit followed by an fldcw of a static value.
+ * fnclex would be of little use because it would leave junk on the FPU stack.
+ * Returning from the handler would be even less safe than usual because
+ * IRQ13 exception handling makes exceptions even less precise than usual.
+ */
+void
+npxintr(frame)
+ struct intrframe frame;
+{
+ int code;
+
+ if (npxproc == NULL || !npx_exists) {
+ /* XXX no %p in stand/printf.c. Cast to quiet gcc -Wall. */
+ printf("npxintr: npxproc = %lx, curproc = %lx, npx_exists = %d\n",
+ (u_long) npxproc, (u_long) curproc, npx_exists);
+ panic("npxintr from nowhere");
+ }
+ if (npxproc != curproc) {
+ printf("npxintr: npxproc = %lx, curproc = %lx, npx_exists = %d\n",
+ (u_long) npxproc, (u_long) curproc, npx_exists);
+ panic("npxintr from non-current process");
+ }
+ /*
+ * Save state. This does an implied fninit. It had better not halt
+ * the cpu or we'll hang.
+ */
+ outb(0xf0, 0);
+ fnsave(&curpcb->pcb_savefpu);
+ fwait();
+ /*
+ * Restore control word (was clobbered by fnsave).
+ */
+ fldcw(&curpcb->pcb_savefpu.sv_env.en_cw);
+ fwait();
+ /*
+ * Remember the exception status word and tag word. The current
+ * (almost fninit'ed) fpu state is in the fpu and the exception
+ * state just saved will soon be junk. However, the implied fninit
+ * doesn't change the error pointers or register contents, and we
+ * preserved the control word and will copy the status and tag
+ * words, so the complete exception state can be recovered.
+ */
+ curpcb->pcb_savefpu.sv_ex_sw = curpcb->pcb_savefpu.sv_env.en_sw;
+ curpcb->pcb_savefpu.sv_ex_tw = curpcb->pcb_savefpu.sv_env.en_tw;
+
+ /*
+ * Pass exception to process.
+ */
+ if (ISPL(frame.if_cs) == SEL_UPL) {
+ /*
+ * Interrupt is essentially a trap, so we can afford to call
+ * the SIGFPE handler (if any) as soon as the interrupt
+ * returns.
+ *
+ * XXX little or nothing is gained from this, and plenty is
+ * lost - the interrupt frame has to contain the trap frame
+ * (this is otherwise only necessary for the rescheduling trap
+ * in doreti, and the frame for that could easily be set up
+ * just before it is used).
+ */
+ curproc->p_regs = (int *)&frame.if_es;
+ curpcb->pcb_flags |= FM_TRAP; /* used by sendsig */
+#ifdef notyet
+ /*
+ * Encode the appropriate code for detailed information on
+ * this exception.
+ */
+ code = XXX_ENCODE(curpcb->pcb_savefpu.sv_ex_sw);
+#else
+ code = 0; /* XXX */
+#endif
+ trapsignal(curproc, SIGFPE, code);
+ curpcb->pcb_flags &= ~FM_TRAP;
+ } else {
+ /*
+ * Nested interrupt. These losers occur when:
+ * o an IRQ13 is bogusly generated at a bogus time, e.g.:
+ * o immediately after an fnsave or frstor of an
+ * error state.
+ * o a couple of 386 instructions after
+ * "fstpl _memvar" causes a stack overflow.
+ * These are especially nasty when combined with a
+ * trace trap.
+ * o an IRQ13 occurs at the same time as another higher-
+ * priority interrupt.
+ *
+ * Treat them like a true async interrupt.
+ */
+ psignal(npxproc, SIGFPE);
+ }
+}
+
+/*
+ * Implement device not available (DNA) exception
+ *
+ * It would be better to switch FP context here (only). This would require
+ * saving the state in the proc table instead of in the pcb.
+ */
+int
+npxdna()
+{
+ if (!npx_exists)
+ return (0);
+ if (npxproc != NULL) {
+ printf("npxdna: npxproc = %lx, curproc = %lx\n",
+ (u_long) npxproc, (u_long) curproc);
+ panic("npxdna");
+ }
+ stop_emulating();
+ /*
+ * Record new context early in case frstor causes an IRQ13.
+ */
+ npxproc = curproc;
+ /*
+ * The following frstor may cause an IRQ13 when the state being
+ * restored has a pending error. The error will appear to have been
+ * triggered by the current (npx) user instruction even when that
+ * instruction is a no-wait instruction that should not trigger an
+ * error (e.g., fnclex). On at least one 486 system all of the
+ * no-wait instructions are broken the same as frstor, so our
+ * treatment does not amplify the breakage. On at least one
+ * 386/Cyrix 387 system, fnclex works correctly while frstor and
+ * fnsave are broken, so our treatment breaks fnclex if it is the
+ * first FPU instruction after a context switch.
+ */
+ frstor(&curpcb->pcb_savefpu);
+
+ return (1);
+}
+
+/*
+ * Wrapper for fnsave instruction to handle h/w bugs. If there is an error
+ * pending, then fnsave generates a bogus IRQ13 on some systems. Force
+ * any IRQ13 to be handled immediately, and then ignore it. This routine is
+ * often called at splhigh so it must not use many system services. In
+ * particular, it's much easier to install a special handler than to
+ * guarantee that it's safe to use npxintr() and its supporting code.
+ */
+void
+npxsave(addr)
+ struct save87 *addr;
+{
+ u_char icu1_mask;
+ u_char icu2_mask;
+ u_char old_icu1_mask;
+ u_char old_icu2_mask;
+ struct gate_descriptor save_idt_npxintr;
+
+ disable_intr();
+ old_icu1_mask = inb(IO_ICU1 + 1);
+ old_icu2_mask = inb(IO_ICU2 + 1);
+ save_idt_npxintr = idt[npx_intrno];
+ outb(IO_ICU1 + 1, old_icu1_mask & ~(IRQ_SLAVE | npx0mask));
+ outb(IO_ICU2 + 1, old_icu2_mask & ~(npx0mask >> 8));
+ idt[npx_intrno] = npx_idt_probeintr;
+ enable_intr();
+ stop_emulating();
+ fnsave(addr);
+ fwait();
+ start_emulating();
+ npxproc = NULL;
+ disable_intr();
+ icu1_mask = inb(IO_ICU1 + 1); /* masks may have changed */
+ icu2_mask = inb(IO_ICU2 + 1);
+ outb(IO_ICU1 + 1,
+ (icu1_mask & ~npx0mask) | (old_icu1_mask & npx0mask));
+ outb(IO_ICU2 + 1,
+ (icu2_mask & ~(npx0mask >> 8))
+ | (old_icu2_mask & (npx0mask >> 8)));
+ idt[npx_intrno] = save_idt_npxintr;
+ enable_intr(); /* back to usual state */
+}
+
+#endif /* NNPX > 0 */
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
new file mode 100644
index 0000000..18ec37b
--- /dev/null
+++ b/sys/amd64/amd64/genassym.c
@@ -0,0 +1,174 @@
+/*-
+ * Copyright (c) 1982, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)genassym.c 5.11 (Berkeley) 5/10/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00154
+ * -------------------- ----- ----------------------
+ *
+ * 24 Apr 93 Bruce Evans/Dave Rivers Npx-0.5 support
+ *
+ */
+static char rcsid[] = "$Header: /usr/bill/working/sys/i386/i386/RCS/genassym.c,v 1.2 92/01/21 14:22:02 william Exp $";
+
+#ifndef lint
+static char sccsid[] = "@(#)genassym.c 5.11 (Berkeley) 5/10/91";
+#endif /* not lint */
+
+#include "sys/param.h"
+#include "sys/buf.h"
+#include "sys/vmmeter.h"
+#include "sys/proc.h"
+#include "sys/user.h"
+#include "sys/mbuf.h"
+#include "sys/msgbuf.h"
+#include "sys/resourcevar.h"
+#include "machine/cpu.h"
+#include "machine/trap.h"
+#include "machine/psl.h"
+#include "machine/reg.h"
+#include "sys/syscall.h"
+#include "vm/vm_param.h"
+#include "vm/vm_map.h"
+#include "machine/pmap.h"
+
+main()
+{
+ struct proc *p = (struct proc *)0;
+ struct vmmeter *vm = (struct vmmeter *)0;
+ struct user *up = (struct user *)0;
+ struct rusage *rup = (struct rusage *)0;
+ struct uprof *uprof = (struct uprof *)0;
+ struct vmspace *vms = (struct vmspace *)0;
+ vm_map_t map = (vm_map_t)0;
+ pmap_t pmap = (pmap_t)0;
+ struct pcb *pcb = (struct pcb *)0;
+ register unsigned i;
+
+ printf("#define\tI386_CR3PAT %d\n", I386_CR3PAT);
+ printf("#define\tUDOT_SZ %d\n", sizeof(struct user));
+ printf("#define\tP_LINK %d\n", &p->p_link);
+ printf("#define\tP_RLINK %d\n", &p->p_rlink);
+ printf("#define\tP_VMSPACE %d\n", &p->p_vmspace);
+ printf("#define\tVM_PMAP %d\n", &vms->vm_pmap);
+ printf("#define\tP_ADDR %d\n", &p->p_addr);
+ printf("#define\tP_PRI %d\n", &p->p_pri);
+ printf("#define\tP_STAT %d\n", &p->p_stat);
+ printf("#define\tP_WCHAN %d\n", &p->p_wchan);
+ printf("#define\tP_FLAG %d\n", &p->p_flag);
+ printf("#define\tP_PID %d\n", &p->p_pid);
+ printf("#define\tSSLEEP %d\n", SSLEEP);
+ printf("#define\tSRUN %d\n", SRUN);
+ printf("#define\tV_SWTCH %d\n", &vm->v_swtch);
+ printf("#define\tV_TRAP %d\n", &vm->v_trap);
+ printf("#define\tV_SYSCALL %d\n", &vm->v_syscall);
+ printf("#define\tV_INTR %d\n", &vm->v_intr);
+ printf("#define\tV_SOFT %d\n", &vm->v_soft);
+ printf("#define\tV_PDMA %d\n", &vm->v_pdma);
+ printf("#define\tV_FAULTS %d\n", &vm->v_faults);
+ printf("#define\tV_PGREC %d\n", &vm->v_pgrec);
+ printf("#define\tV_FASTPGREC %d\n", &vm->v_fastpgrec);
+ printf("#define\tUPAGES %d\n", UPAGES);
+ printf("#define\tHIGHPAGES %d\n", HIGHPAGES);
+ printf("#define\tCLSIZE %d\n", CLSIZE);
+ printf("#define\tNBPG %d\n", NBPG);
+ printf("#define\tNPTEPG %d\n", NPTEPG);
+ printf("#define\tPGSHIFT %d\n", PGSHIFT);
+ printf("#define\tSYSPTSIZE %d\n", SYSPTSIZE);
+ printf("#define\tUSRPTSIZE %d\n", USRPTSIZE);
+ printf("#define\tUSRIOSIZE %d\n", USRIOSIZE);
+#ifdef SYSVSHM
+ printf("#define\tSHMMAXPGS %d\n", SHMMAXPGS);
+#endif
+ printf("#define\tUSRSTACK %d\n", USRSTACK);
+ printf("#define\tMSGBUFPTECNT %d\n", btoc(sizeof (struct msgbuf)));
+ printf("#define\tNMBCLUSTERS %d\n", NMBCLUSTERS);
+ printf("#define\tMCLBYTES %d\n", MCLBYTES);
+ printf("#define\tPCB_LINK %d\n", &pcb->pcb_tss.tss_link);
+ printf("#define\tPCB_ESP0 %d\n", &pcb->pcb_tss.tss_esp0);
+ printf("#define\tPCB_SS0 %d\n", &pcb->pcb_tss.tss_ss0);
+ printf("#define\tPCB_ESP1 %d\n", &pcb->pcb_tss.tss_esp1);
+ printf("#define\tPCB_SS1 %d\n", &pcb->pcb_tss.tss_ss1);
+ printf("#define\tPCB_ESP2 %d\n", &pcb->pcb_tss.tss_esp2);
+ printf("#define\tPCB_SS2 %d\n", &pcb->pcb_tss.tss_ss2);
+ printf("#define\tPCB_CR3 %d\n", &pcb->pcb_tss.tss_cr3);
+ printf("#define\tPCB_EIP %d\n", &pcb->pcb_tss.tss_eip);
+ printf("#define\tPCB_EFLAGS %d\n", &pcb->pcb_tss.tss_eflags);
+ printf("#define\tPCB_EAX %d\n", &pcb->pcb_tss.tss_eax);
+ printf("#define\tPCB_ECX %d\n", &pcb->pcb_tss.tss_ecx);
+ printf("#define\tPCB_EDX %d\n", &pcb->pcb_tss.tss_edx);
+ printf("#define\tPCB_EBX %d\n", &pcb->pcb_tss.tss_ebx);
+ printf("#define\tPCB_ESP %d\n", &pcb->pcb_tss.tss_esp);
+ printf("#define\tPCB_EBP %d\n", &pcb->pcb_tss.tss_ebp);
+ printf("#define\tPCB_ESI %d\n", &pcb->pcb_tss.tss_esi);
+ printf("#define\tPCB_EDI %d\n", &pcb->pcb_tss.tss_edi);
+ printf("#define\tPCB_ES %d\n", &pcb->pcb_tss.tss_es);
+ printf("#define\tPCB_CS %d\n", &pcb->pcb_tss.tss_cs);
+ printf("#define\tPCB_SS %d\n", &pcb->pcb_tss.tss_ss);
+ printf("#define\tPCB_DS %d\n", &pcb->pcb_tss.tss_ds);
+ printf("#define\tPCB_FS %d\n", &pcb->pcb_tss.tss_fs);
+ printf("#define\tPCB_GS %d\n", &pcb->pcb_tss.tss_gs);
+ printf("#define\tPCB_LDT %d\n", &pcb->pcb_tss.tss_ldt);
+ printf("#define\tPCB_IOOPT %d\n", &pcb->pcb_tss.tss_ioopt);
+ printf("#define\tNKMEMCLUSTERS %d\n", NKMEMCLUSTERS);
+ printf("#define\tU_PROF %d\n", &up->u_stats.p_prof);
+ printf("#define\tU_PROFSCALE %d\n", &up->u_stats.p_prof.pr_scale);
+ printf("#define\tPR_BASE %d\n", &uprof->pr_base);
+ printf("#define\tPR_SIZE %d\n", &uprof->pr_size);
+ printf("#define\tPR_OFF %d\n", &uprof->pr_off);
+ printf("#define\tPR_SCALE %d\n", &uprof->pr_scale);
+ printf("#define\tRU_MINFLT %d\n", &rup->ru_minflt);
+ printf("#define\tPCB_FLAGS %d\n", &pcb->pcb_flags);
+ printf("#define\tPCB_SAVEFPU %d\n", &pcb->pcb_savefpu);
+#ifdef notused
+ printf("#define\tFP_WASUSED %d\n", FP_WASUSED);
+ printf("#define\tFP_NEEDSSAVE %d\n", FP_NEEDSSAVE);
+ printf("#define\tFP_NEEDSRESTORE %d\n", FP_NEEDSRESTORE);
+#endif
+ printf("#define\tFP_USESEMC %d\n", FP_USESEMC);
+ printf("#define\tPCB_SAVEEMC %d\n", &pcb->pcb_saveemc);
+ printf("#define\tPCB_CMAP2 %d\n", &pcb->pcb_cmap2);
+ printf("#define\tPCB_SIGC %d\n", pcb->pcb_sigc);
+ printf("#define\tPCB_IML %d\n", &pcb->pcb_iml);
+ printf("#define\tPCB_ONFAULT %d\n", &pcb->pcb_onfault);
+
+ printf("#define\tB_READ %d\n", B_READ);
+ printf("#define\tENOENT %d\n", ENOENT);
+ printf("#define\tEFAULT %d\n", EFAULT);
+ printf("#define\tENAMETOOLONG %d\n", ENAMETOOLONG);
+ exit(0);
+}
diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S
new file mode 100644
index 0000000..d558dba
--- /dev/null
+++ b/sys/amd64/amd64/locore.S
@@ -0,0 +1,1830 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)locore.s 7.3 (Berkeley) 5/13/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 5 00158
+ * -------------------- ----- ----------------------
+ *
+ * 06 Aug 92 Pace Willisson Allow VGA memory to be mapped
+ * 28 Nov 92 Frank MacLachlan Aligned addresses and data
+ * on 32bit boundaries.
+ * 25 Mar 93 Kevin Lahey Add syscall counter for vmstat
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ * 25 Apr 93 Bruce Evans Support new interrupt code (intr-0.1)
+ */
+
+
+/*
+ * locore.s: 4BSD machine support for the Intel 386
+ * Preliminary version
+ * Written by William F. Jolitz, 386BSD Project
+ */
+
+#include "assym.s"
+#include "machine/psl.h"
+#include "machine/pte.h"
+
+#include "errno.h"
+
+#include "machine/trap.h"
+
+#include "machine/specialreg.h"
+#include "i386/isa/debug.h"
+
+#define KDSEL 0x10
+#define SEL_RPL_MASK 0x0003
+#define TRAPF_CS_OFF (13 * 4)
+
+/*
+ * Note: This version greatly munged to avoid various assembler errors
+ * that may be fixed in newer versions of gas. Perhaps newer versions
+ * will have more pleasant appearance.
+ */
+
+ .set IDXSHIFT,10
+ .set SYSTEM,0xFE000000 # virtual address of system start
+ /*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */
+ .set SYSPDROFF,0x3F8 # Page dir index of System Base
+
+#define ALIGN_DATA .align 2
+#define ALIGN_TEXT .align 2,0x90 /* 4-byte boundaries, NOP-filled */
+#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte boundaries better for 486 */
+
+/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
+/* XXX: NOP and FASTER_NOP are misleadingly named */
+#ifdef BROKEN_HARDWARE_AND_OR_SOFTWARE /* XXX - rarely necessary */
+#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
+#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
+#else
+#define FASTER_NOP
+#define NOP
+#endif
+
+/*
+ * PTmap is recursive pagemap at top of virtual address space.
+ * Within PTmap, the page directory can be found (third indirection).
+ */
+ .set PDRPDROFF,0x3F7 # Page dir index of Page dir
+ .globl _PTmap, _PTD, _PTDpde, _Sysmap
+ .set _PTmap,0xFDC00000
+ .set _PTD,0xFDFF7000
+ .set _Sysmap,0xFDFF8000
+ .set _PTDpde,0xFDFF7000+4*PDRPDROFF
+
+/*
+ * APTmap, APTD is the alternate recursive pagemap.
+ * It's used when modifying another process's page tables.
+ */
+ .set APDRPDROFF,0x3FE # Page dir index of Page dir
+ .globl _APTmap, _APTD, _APTDpde
+ .set _APTmap,0xFF800000
+ .set _APTD,0xFFBFE000
+ .set _APTDpde,0xFDFF7000+4*APDRPDROFF
+
+/*
+ * Access to each processes kernel stack is via a region of
+ * per-process address space (at the beginning), immediatly above
+ * the user process stack.
+ */
+ .set _kstack, USRSTACK
+ .globl _kstack
+ .set PPDROFF,0x3F6
+ .set PPTEOFF,0x400-UPAGES # 0x3FE
+
+#define ENTRY(name) \
+ .globl _/**/name; ALIGN_TEXT; _/**/name:
+#define ALTENTRY(name) ENTRY(name)
+
+/*
+ * Initialization
+ */
+ .data
+ .globl _cpu,_cold,_boothowto,_bootdev,_cyloffset,_atdevbase,_atdevphys
+_cpu: .long 0 # are we 386, 386sx, or 486
+_cold: .long 1 # cold till we are not
+_atdevbase: .long 0 # location of start of iomem in virtual
+_atdevphys: .long 0 # location of device mapping ptes (phys)
+
+ .globl _IdlePTD, _KPTphys
+_IdlePTD: .long 0
+_KPTphys: .long 0
+
+ .space 512
+tmpstk:
+ .text
+ .globl start
+start: movw $0x1234,%ax
+ movw %ax,0x472 # warm boot
+ jmp 1f
+ .space 0x500 # skip over warm boot shit
+
+ /*
+ * pass parameters on stack (howto, bootdev, unit, cyloffset)
+ * note: (%esp) is return address of boot
+ * ( if we want to hold onto /boot, it's physical %esp up to _end)
+ */
+
+ 1: movl 4(%esp),%eax
+ movl %eax,_boothowto-SYSTEM
+ movl 8(%esp),%eax
+ movl %eax,_bootdev-SYSTEM
+ movl 12(%esp),%eax
+ movl %eax, _cyloffset-SYSTEM
+
+ /*
+ * Finished with old stack; load new %esp now instead of later so
+ * we can trace this code without having to worry about the trace
+ * trap clobbering the memory test or the zeroing of the bss+bootstrap
+ * page tables.
+ *
+ * XXX - wdboot clears the bss after testing that this is safe.
+ * This is too wasteful - memory below 640K is scarce. The boot
+ * program should check:
+ * text+data <= &stack_variable - more_space_for_stack
+ * text+data+bss+pad+space_for_page_tables <= end_of_memory
+ * Oops, the gdt is in the carcass of the boot program so clearing
+ * the rest of memory is still not possible.
+ */
+ movl $ tmpstk-SYSTEM,%esp # bootstrap stack end location
+
+#ifdef garbage
+ /* count up memory */
+
+ xorl %eax,%eax # start with base memory at 0x0
+ #movl $ 0xA0000/NBPG,%ecx # look every 4K up to 640K
+ movl $ 0xA0,%ecx # look every 4K up to 640K
+1: movl (%eax),%ebx # save location to check
+ movl $0xa55a5aa5,(%eax) # write test pattern
+ /* flush stupid cache here! (with bcopy (0,0,512*1024) ) */
+ cmpl $0xa55a5aa5,(%eax) # does not check yet for rollover
+ jne 2f
+ movl %ebx,(%eax) # restore memory
+ addl $ NBPG,%eax
+ loop 1b
+2: shrl $12,%eax
+ movl %eax,_Maxmem-SYSTEM
+
+ movl $0x100000,%eax # next, talley remaining memory
+ #movl $((0xFFF000-0x100000)/NBPG),%ecx
+ movl $(0xFFF-0x100),%ecx
+1: movl (%eax),%ebx # save location to check
+ movl $0xa55a5aa5,(%eax) # write test pattern
+ cmpl $0xa55a5aa5,(%eax) # does not check yet for rollover
+ jne 2f
+ movl %ebx,(%eax) # restore memory
+ addl $ NBPG,%eax
+ loop 1b
+2: shrl $12,%eax
+ movl %eax,_Maxmem-SYSTEM
+#endif
+
+/* find end of kernel image */
+ movl $_end-SYSTEM,%ecx
+ addl $ NBPG-1,%ecx
+ andl $~(NBPG-1),%ecx
+ movl %ecx,%esi
+
+/* clear bss and memory for bootstrap pagetables. */
+ movl $_edata-SYSTEM,%edi
+ subl %edi,%ecx
+ addl $(UPAGES+5)*NBPG,%ecx
+/*
+ * Virtual address space of kernel:
+ *
+ * text | data | bss | page dir | proc0 kernel stack | usr stk map | Sysmap
+ * 0 1 2 3 4
+ */
+ xorl %eax,%eax # pattern
+ cld
+ rep
+ stosb
+
+ movl %esi,_IdlePTD-SYSTEM /*physical address of Idle Address space */
+
+#define fillkpt \
+1: movl %eax,(%ebx) ; \
+ addl $ NBPG,%eax ; /* increment physical address */ \
+ addl $4,%ebx ; /* next pte */ \
+ loop 1b ;
+
+/*
+ * Map Kernel
+ * N.B. don't bother with making kernel text RO, as 386
+ * ignores R/W AND U/S bits on kernel access (only v works) !
+ *
+ * First step - build page tables
+ */
+ movl %esi,%ecx # this much memory,
+ shrl $ PGSHIFT,%ecx # for this many pte s
+ addl $ UPAGES+4,%ecx # including our early context
+ movl $0xa0,%ecx # XXX - cover debugger pages
+ movl $PG_V|PG_KW,%eax # having these bits set,
+ lea (4*NBPG)(%esi),%ebx # physical address of KPT in proc 0,
+ movl %ebx,_KPTphys-SYSTEM # in the kernel page table,
+ fillkpt
+
+/* map I/O memory map */
+
+ movl $0x100-0xa0,%ecx # for this many pte s,
+ movl $(0xa0000|PG_V|PG_UW),%eax # having these bits set,(perhaps URW?) XXX 06 Aug 92
+ movl %ebx,_atdevphys-SYSTEM # remember phys addr of ptes
+ fillkpt
+
+ /* map proc 0's kernel stack into user page table page */
+
+ movl $ UPAGES,%ecx # for this many pte s,
+ lea (1*NBPG)(%esi),%eax # physical address in proc 0
+ lea (SYSTEM)(%eax),%edx
+ movl %edx,_proc0paddr-SYSTEM # remember VA for 0th process init
+ orl $PG_V|PG_KW,%eax # having these bits set,
+ lea (3*NBPG)(%esi),%ebx # physical address of stack pt in proc 0
+ addl $(PPTEOFF*4),%ebx
+ fillkpt
+
+/*
+ * Construct a page table directory
+ * (of page directory elements - pde's)
+ */
+ /* install a pde for temporary double map of bottom of VA */
+ lea (4*NBPG)(%esi),%eax # physical address of kernel page table
+ orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92
+ movl %eax,(%esi) # which is where temp maps!
+
+ /* kernel pde's */
+ movl $ 3,%ecx # for this many pde s,
+ lea (SYSPDROFF*4)(%esi), %ebx # offset of pde for kernel
+ fillkpt
+
+ /* install a pde recursively mapping page directory as a page table! */
+ movl %esi,%eax # phys address of ptd in proc 0
+ orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92
+ movl %eax, PDRPDROFF*4(%esi) # which is where PTmap maps!
+
+ /* install a pde to map kernel stack for proc 0 */
+ lea (3*NBPG)(%esi),%eax # physical address of pt in proc 0
+ orl $PG_V|PG_KW,%eax # pde entry is valid
+ movl %eax,PPDROFF*4(%esi) # which is where kernel stack maps!
+
+ /* copy and convert stuff from old gdt and idt for debugger */
+
+ cmpl $0x0375c339,0x96104 # XXX - debugger signature
+ jne 1f
+ movb $1,_bdb_exists-SYSTEM
+1:
+ pushal
+ subl $2*6,%esp
+
+ sgdt (%esp)
+ movl 2(%esp),%esi # base address of current gdt
+ movl $_gdt-SYSTEM,%edi
+ movl %edi,2(%esp)
+ movl $8*18/4,%ecx
+ rep # copy gdt
+ movsl
+ movl $_gdt-SYSTEM,-8+2(%edi) # adjust gdt self-ptr
+ movb $0x92,-8+5(%edi)
+
+ sidt 6(%esp)
+ movl 6+2(%esp),%esi # base address of current idt
+ movl 8+4(%esi),%eax # convert dbg descriptor to ...
+ movw 8(%esi),%ax
+ movl %eax,bdb_dbg_ljmp+1-SYSTEM # ... immediate offset ...
+ movl 8+2(%esi),%eax
+ movw %ax,bdb_dbg_ljmp+5-SYSTEM # ... and selector for ljmp
+ movl 24+4(%esi),%eax # same for bpt descriptor
+ movw 24(%esi),%ax
+ movl %eax,bdb_bpt_ljmp+1-SYSTEM
+ movl 24+2(%esi),%eax
+ movw %ax,bdb_bpt_ljmp+5-SYSTEM
+
+ movl $_idt-SYSTEM,%edi
+ movl %edi,6+2(%esp)
+ movl $8*4/4,%ecx
+ rep # copy idt
+ movsl
+
+ lgdt (%esp)
+ lidt 6(%esp)
+
+ addl $2*6,%esp
+ popal
+
+ /* load base of page directory, and enable mapping */
+ movl %esi,%eax # phys address of ptd in proc 0
+ orl $ I386_CR3PAT,%eax
+ movl %eax,%cr3 # load ptd addr into mmu
+ movl %cr0,%eax # get control word
+#ifdef USE_486_WRITE_PROTECT
+ orl $CR0_PE|CR0_PG|CR0_WP,%eax # and let s page!
+#else
+ orl $CR0_PE|CR0_PG,%eax # and let s page!
+#endif
+ movl %eax,%cr0 # NOW!
+
+ pushl $begin # jump to high mem!
+ ret
+
+begin: /* now running relocated at SYSTEM where the system is linked to run */
+
+ .globl _Crtat
+ movl _Crtat,%eax
+ subl $0xfe0a0000,%eax
+ movl _atdevphys,%edx # get pte PA
+ subl _KPTphys,%edx # remove base of ptes, now have phys offset
+ shll $ PGSHIFT-2,%edx # corresponding to virt offset
+ addl $ SYSTEM,%edx # add virtual base
+ movl %edx, _atdevbase
+ addl %eax,%edx
+ movl %edx,_Crtat
+
+ /* set up bootstrap stack */
+ movl $ _kstack+UPAGES*NBPG-4*12,%esp # bootstrap stack end location
+ xorl %eax,%eax # mark end of frames
+ movl %eax,%ebp
+ movl _proc0paddr, %eax
+ movl %esi, PCB_CR3(%eax)
+
+ lea 7*NBPG(%esi),%esi # skip past stack.
+ pushl %esi
+
+ /* relocate debugger gdt entries */
+
+ movl $_gdt+8*9,%eax # adjust slots 9-17
+ movl $9,%ecx
+reloc_gdt:
+ movb $0xfe,7(%eax) # top byte of base addresses, was 0,
+ addl $8,%eax # now SYSTEM>>24
+ loop reloc_gdt
+
+ cmpl $0,_bdb_exists
+ je 1f
+ int $3
+1:
+
+ call _init386 # wire 386 chip for unix operation
+
+ movl $0,_PTD
+ call _main
+ popl %esi
+
+ .globl __ucodesel,__udatasel
+ movl __ucodesel,%eax
+ movl __udatasel,%ecx
+ # build outer stack frame
+ pushl %ecx # user ss
+ pushl $ USRSTACK # user esp
+ pushl %eax # user cs
+ pushl $0 # user ip
+ movl %cx,%ds
+ movl %cx,%es
+ movl %ax,%fs # double map cs to fs
+ movl %cx,%gs # and ds to gs
+ lret # goto user!
+
+ pushl $lretmsg1 /* "should never get here!" */
+ call _panic
+lretmsg1:
+ .asciz "lret: toinit\n"
+
+
+ .set exec,59
+ .set exit,1
+
+#define LCALL(x,y) .byte 0x9a ; .long y; .word x
+/*
+ * Icode is copied out to process 1 to exec /etc/init.
+ * If the exec fails, process 1 exits.
+ */
+ENTRY(icode)
+ # pushl $argv-_icode # gas fucks up again
+ movl $argv,%eax
+ subl $_icode,%eax
+ pushl %eax
+
+ # pushl $init-_icode
+ movl $init,%eax
+ subl $_icode,%eax
+ pushl %eax
+ pushl %eax # dummy out rta
+
+ movl %esp,%ebp
+ movl $exec,%eax
+ LCALL(0x7,0x0)
+ pushl %eax
+ movl $exit,%eax
+ pushl %eax # dummy out rta
+ LCALL(0x7,0x0)
+
+init:
+ .asciz "/sbin/init"
+ ALIGN_DATA
+argv:
+ .long init+6-_icode # argv[0] = "init" ("/sbin/init" + 6)
+ .long eicode-_icode # argv[1] follows icode after copyout
+ .long 0
+eicode:
+
+ .globl _szicode
+_szicode:
+ .long _szicode-_icode
+
+ENTRY(sigcode)
+ call 12(%esp)
+ lea 28(%esp),%eax # scp (the call may have clobbered the
+ # copy at 8(%esp))
+ # XXX - use genassym
+ pushl %eax
+ pushl %eax # junk to fake return address
+ movl $103,%eax # sigreturn()
+ LCALL(0x7,0) # enter kernel with args on stack
+ hlt # never gets here
+
+ .globl _szsigcode
+_szsigcode:
+ .long _szsigcode-_sigcode
+
+ /*
+ * Support routines for GCC
+ */
+ENTRY(__udivsi3)
+ movl 4(%esp),%eax
+ xorl %edx,%edx
+ divl 8(%esp)
+ ret
+
+ENTRY(__divsi3)
+ movl 4(%esp),%eax
+ cltd
+ idivl 8(%esp)
+ ret
+
+ /*
+ * I/O bus instructions via C
+ */
+ENTRY(inb)
+ movl 4(%esp),%edx
+ subl %eax,%eax # clr eax
+ NOP
+ inb %dx,%al
+ ret
+
+
+ENTRY(inw)
+ movl 4(%esp),%edx
+ subl %eax,%eax # clr eax
+ NOP
+ inw %dx,%ax
+ ret
+
+
+ENTRY(rtcin)
+ movl 4(%esp),%eax
+ outb %al,$0x70
+ subl %eax,%eax # clr eax
+ inb $0x71,%al
+ ret
+
+ENTRY(outb)
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outb %al,%dx
+ NOP
+ ret
+
+ENTRY(outw)
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outw %ax,%dx
+ NOP
+ ret
+
+ /*
+ * void bzero(void *base, u_int cnt)
+ */
+
+ENTRY(bzero)
+ pushl %edi
+ movl 8(%esp),%edi
+ movl 12(%esp),%ecx
+ xorl %eax,%eax
+ shrl $2,%ecx
+ cld
+ rep
+ stosl
+ movl 12(%esp),%ecx
+ andl $3,%ecx
+ rep
+ stosb
+ popl %edi
+ ret
+
+ /*
+ * fillw (pat,base,cnt)
+ */
+
+ENTRY(fillw)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosw
+ popl %edi
+ ret
+
+ENTRY(bcopyb)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards. */
+ addl %ecx,%esi
+ std
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyw)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $1,%ecx /* copy by 16-bit words */
+ rep
+ movsw
+ adc %ecx,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $1,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 16-bit words */
+ shrl $1,%ecx
+ decl %esi
+ decl %edi
+ rep
+ movsw
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyx)
+ movl 16(%esp),%eax
+ cmpl $2,%eax
+ je _bcopyw
+ cmpl $4,%eax
+ jne _bcopyb
+ /*
+ * Fall through to bcopy. ENTRY() provides harmless fill bytes.
+ */
+
+ /*
+ * (ov)bcopy (src,dst,cnt)
+ * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
+ * Changed by bde to not bother returning %eax = 0.
+ */
+
+ENTRY(ovbcopy)
+ENTRY(bcopy)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $2,%ecx /* copy by 32-bit words */
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $3,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 32-bit words */
+ shrl $2,%ecx
+ subl $3,%esi
+ subl $3,%edi
+ rep
+ movsl
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+#ifdef notdef
+ENTRY(copyout)
+ movl _curpcb, %eax
+ movl $cpyflt, PCB_ONFAULT(%eax) # in case we page/protection violate
+ pushl %esi
+ pushl %edi
+ pushl %ebx
+ movl 16(%esp), %esi
+ movl 20(%esp), %edi
+ movl 24(%esp), %ebx
+
+ /* first, check to see if "write fault" */
+1: movl %edi, %eax
+#ifdef notyet
+ shrl $IDXSHIFT, %eax /* fetch pte associated with address */
+ andb $0xfc, %al
+ movl _PTmap(%eax), %eax
+
+ andb $7, %al /* if we are the one case that won't trap... */
+ cmpb $5, %al
+ jne 2f
+ /* ... then simulate the trap! */
+ pushl %edi
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+
+ cmpl $0, %eax /* if not ok, return */
+ jne cpyflt
+ /* otherwise, continue with reference */
+2:
+ movl %edi, %eax /* calculate remainder this pass */
+ andl $0xfffff000, %eax
+ movl $NBPG, %ecx
+ subl %eax, %ecx
+ cmpl %ecx, %ebx
+ jle 3f
+ movl %ebx, %ecx
+3: subl %ecx, %ebx
+ movl %ecx, %edx
+#else
+ movl %ebx, %ecx
+ movl %ebx, %edx
+#endif
+
+ shrl $2,%ecx /* movem */
+ cld
+ rep
+ movsl
+ movl %edx, %ecx /* don't depend on ecx here! */
+ andl $3, %ecx
+ rep
+ movsb
+
+#ifdef notyet
+ cmpl $0, %ebx
+ jl 1b
+#endif
+
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
+ pushl %esi
+ pushl %edi
+ pushl %ebx # XXX - not used, but affects stack offsets
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx
+ rep
+ movsb
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+cpyflt:
+ popl %ebx
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $ EFAULT,%eax
+ ret
+#else
+ENTRY(copyout)
+ movl _curpcb,%eax
+ movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+cpyflt: popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $ EFAULT,%eax
+ ret
+
+#endif
+
+ # insb(port,addr,cnt)
+ENTRY(insb)
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ insb
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ # insw(port,addr,cnt)
+ENTRY(insw)
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ .byte 0x66,0xf2,0x6d # rep insw
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ # outsw(port,addr,cnt)
+ENTRY(outsw)
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ .byte 0x66,0xf2,0x6f # rep outsw
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ # outsb(port,addr,cnt)
+ENTRY(outsb)
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ outsb
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ /*
+ * void lgdt(struct region_descriptor *rdp);
+ */
+ENTRY(lgdt)
+ /* reload the descriptor table */
+ movl 4(%esp),%eax
+ lgdt (%eax)
+ /* flush the prefetch q */
+ jmp 1f
+ nop
+1:
+ /* reload "stale" selectors */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ movl %ax,%ss
+
+ /* reload code selector by turning return into intersegmental return */
+ movl (%esp),%eax
+ pushl %eax
+ # movl $KCSEL,4(%esp)
+ movl $8,4(%esp)
+ lret
+
+ /*
+ * void lidt(struct region_descriptor *rdp);
+ */
+ENTRY(lidt)
+ movl 4(%esp),%eax
+ lidt (%eax)
+ ret
+
+ /*
+ * void lldt(u_short sel)
+ */
+ENTRY(lldt)
+ lldt 4(%esp)
+ ret
+
+ /*
+ * void ltr(u_short sel)
+ */
+ENTRY(ltr)
+ ltr 4(%esp)
+ ret
+
+ /*
+ * void lcr3(caddr_t cr3)
+ */
+ ALIGN_TEXT
+ENTRY(load_cr3)
+ALTENTRY(lcr3)
+ movl 4(%esp),%eax
+ orl $ I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+ # tlbflush()
+ENTRY(tlbflush)
+ movl %cr3,%eax
+ orl $ I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+ # lcr0(cr0)
+ENTRY(lcr0)
+ALTENTRY(load_cr0)
+ movl 4(%esp),%eax
+ movl %eax,%cr0
+ ret
+
+ # rcr0()
+ENTRY(rcr0)
+ movl %cr0,%eax
+ ret
+
+ # rcr2()
+ENTRY(rcr2)
+ movl %cr2,%eax
+ ret
+
+ # rcr3()
+ENTRY(_cr3)
+ALTENTRY(rcr3)
+ movl %cr3,%eax
+ ret
+
+ # ssdtosd(*ssdp,*sdp)
+ENTRY(ssdtosd)
+ pushl %ebx
+ movl 8(%esp),%ecx
+ movl 8(%ecx),%ebx
+ shll $16,%ebx
+ movl (%ecx),%edx
+ roll $16,%edx
+ movb %dh,%bl
+ movb %dl,%bh
+ rorl $8,%ebx
+ movl 4(%ecx),%eax
+ movw %ax,%dx
+ andl $0xf0000,%eax
+ orl %eax,%ebx
+ movl 12(%esp),%ecx
+ movl %edx,(%ecx)
+ movl %ebx,4(%ecx)
+ popl %ebx
+ ret
+
+/*
+ * {fu,su},{byte,word}
+ */
+ALTENTRY(fuiword)
+ENTRY(fuword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ .byte 0x65 # use gs
+ movl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(fusword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ .byte 0x65 # use gs
+ movzwl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(fuibyte)
+ENTRY(fubyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ .byte 0x65 # use gs
+ movzbl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ ALIGN_TEXT
+fusufault:
+ movl _curpcb,%ecx
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ decl %eax
+ ret
+
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+
+#ifdef notdef
+ shrl $IDXSHIFT, %edx /* fetch pte associated with address */
+ andb $0xfc, %dl
+ movl _PTmap(%edx), %edx
+
+ andb $7, %dl /* if we are the one case that won't trap... */
+ cmpb $5 , %edx
+ jne 1f
+ /* ... then simulate the trap! */
+ pushl %edi
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+ cmpl $0, %eax /* if not ok, return */
+ jne fusufault
+ movl 8(%esp),%eax /* otherwise, continue with reference */
+1:
+ movl 4(%esp),%edx
+#endif
+ .byte 0x65 # use gs
+ movl %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+#ifdef notdef
+shrl $IDXSHIFT, %edx /* calculate pte address */
+andb $0xfc, %dl
+movl _PTmap(%edx), %edx
+andb $7, %edx /* if we are the one case that won't trap... */
+cmpb $5 , %edx
+jne 1f
+/* ..., then simulate the trap! */
+ pushl %edi
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+movl _curpcb, %ecx # restore trashed registers
+cmpl $0, %eax /* if not ok, return */
+jne fusufault
+movl 8(%esp),%eax
+1: movl 4(%esp),%edx
+#endif
+ .byte 0x65 # use gs
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ ret
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+#ifdef notdef
+shrl $IDXSHIFT, %edx /* calculate pte address */
+andb $0xfc, %dl
+movl _PTmap(%edx), %edx
+andb $7, %edx /* if we are the one case that won't trap... */
+cmpb $5 , %edx
+jne 1f
+/* ..., then simulate the trap! */
+ pushl %edi
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+movl _curpcb, %ecx # restore trashed registers
+cmpl $0, %eax /* if not ok, return */
+jne fusufault
+movl 8(%esp),%eax
+1: movl 4(%esp),%edx
+#endif
+ .byte 0x65 # use gs
+ movb %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ ret
+
+ENTRY(setjmp)
+ movl 4(%esp),%eax
+ movl %ebx, (%eax) # save ebx
+ movl %esp, 4(%eax) # save esp
+ movl %ebp, 8(%eax) # save ebp
+ movl %esi,12(%eax) # save esi
+ movl %edi,16(%eax) # save edi
+ movl (%esp),%edx # get rta
+ movl %edx,20(%eax) # save eip
+ xorl %eax,%eax # return (0);
+ ret
+
+ENTRY(longjmp)
+ movl 4(%esp),%eax
+ movl (%eax),%ebx # restore ebx
+ movl 4(%eax),%esp # restore esp
+ movl 8(%eax),%ebp # restore ebp
+ movl 12(%eax),%esi # restore esi
+ movl 16(%eax),%edi # restore edi
+ movl 20(%eax),%edx # get rta
+ movl %edx,(%esp) # put in return frame
+ xorl %eax,%eax # return (1);
+ incl %eax
+ ret
+/*
+ * The following primitives manipulate the run queues.
+ * _whichqs tells which of the 32 queues _qs
+ * have processes in them. Setrq puts processes into queues, Remrq
+ * removes them from queues. The running process is on no queue,
+ * other processes are on a queue related to p->p_pri, divided by 4
+ * actually to shrink the 0-127 range of priorities into the 32 available
+ * queues.
+ */
+
+ .globl _whichqs,_qs,_cnt,_panic
+ .comm _noproc,4
+ .comm _runrun,4
+
+/*
+ * Setrq(p)
+ *
+ * Call should be made at spl6(), and p->p_stat should be SRUN
+ */
+ENTRY(setrq)
+ movl 4(%esp),%eax
+ cmpl $0,P_RLINK(%eax) # should not be on q already
+ je set1
+ pushl $set2
+ call _panic
+set1:
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btsl %edx,_whichqs # set q full bit
+ shll $3,%edx
+ addl $_qs,%edx # locate q hdr
+ movl %edx,P_LINK(%eax) # link process on tail of q
+ movl P_RLINK(%edx),%ecx
+ movl %ecx,P_RLINK(%eax)
+ movl %eax,P_RLINK(%edx)
+ movl %eax,P_LINK(%ecx)
+ ret
+
+set2: .asciz "setrq"
+
+/*
+ * Remrq(p)
+ *
+ * Call should be made at spl6().
+ */
+ENTRY(remrq)
+ movl 4(%esp),%eax
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btrl %edx,_whichqs # clear full bit, panic if clear already
+ jb rem1
+ pushl $rem3
+ call _panic
+rem1:
+ pushl %edx
+ movl P_LINK(%eax),%ecx # unlink process
+ movl P_RLINK(%eax),%edx
+ movl %edx,P_RLINK(%ecx)
+ movl P_RLINK(%eax),%ecx
+ movl P_LINK(%eax),%edx
+ movl %edx,P_LINK(%ecx)
+ popl %edx
+ movl $_qs,%ecx
+ shll $3,%edx
+ addl %edx,%ecx
+ cmpl P_LINK(%ecx),%ecx # q still has something?
+ je rem2
+ shrl $3,%edx # yes, set bit as still full
+ btsl %edx,_whichqs
+rem2:
+ movl $0,P_RLINK(%eax) # zap reverse link to indicate off list
+ ret
+
+rem3: .asciz "remrq"
+sw0: .asciz "swtch"
+
+/*
+ * When no processes are on the runq, Swtch branches to idle
+ * to wait for something to come ready.
+ */
+ .globl Idle
+ ALIGN_TEXT
+Idle:
+sti_for_idle:
+ sti
+ SHOW_STI
+ ALIGN_TEXT
+idle:
+ call _spl0
+ cmpl $0,_whichqs
+ jne sw1
+ hlt # wait for interrupt
+ jmp idle
+
+ SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
+badsw:
+ pushl $sw0
+ call _panic
+ /*NOTREACHED*/
+
+/*
+ * Swtch()
+ */
+ENTRY(swtch)
+
+ incl _cnt+V_SWTCH
+
+ /* switch to new process. first, save context as needed */
+
+ movl _curproc,%ecx
+
+ /* if no process to save, don't bother */
+ testl %ecx,%ecx
+ je sw1
+
+ movl P_ADDR(%ecx),%ecx
+
+ movl (%esp),%eax # Hardware registers
+ movl %eax, PCB_EIP(%ecx)
+ movl %ebx, PCB_EBX(%ecx)
+ movl %esp, PCB_ESP(%ecx)
+ movl %ebp, PCB_EBP(%ecx)
+ movl %esi, PCB_ESI(%ecx)
+ movl %edi, PCB_EDI(%ecx)
+
+#ifdef NPX
+ /* have we used fp, and need a save? */
+ mov _curproc,%eax
+ cmp %eax,_npxproc
+ jne 1f
+ pushl %ecx /* h/w bugs make saving complicated */
+ leal PCB_SAVEFPU(%ecx),%eax
+ pushl %eax
+ call _npxsave /* do it in a big C function */
+ popl %eax
+ popl %ecx
+1:
+#endif
+
+ movl _CMAP2,%eax # save temporary map PTE
+ movl %eax,PCB_CMAP2(%ecx) # in our context
+ movl $0,_curproc # out of process
+
+ # movw _cpl, %ax
+ # movw %ax, PCB_IML(%ecx) # save ipl
+
+ /* save is done, now choose a new process or idle */
+sw1:
+ cli
+ SHOW_CLI
+ movl _whichqs,%edi
+2:
+ # XXX - bsf is sloow
+ bsfl %edi,%eax # find a full q
+ je sti_for_idle # if none, idle
+ # XX update whichqs?
+swfnd:
+ btrl %eax,%edi # clear q full status
+ jnb 2b # if it was clear, look for another
+ movl %eax,%ebx # save which one we are using
+
+ shll $3,%eax
+ addl $_qs,%eax # select q
+ movl %eax,%esi
+
+#ifdef DIAGNOSTIC
+ cmpl P_LINK(%eax),%eax # linked to self? (e.g. not on list)
+ je badsw # not possible
+#endif
+
+ movl P_LINK(%eax),%ecx # unlink from front of process q
+ movl P_LINK(%ecx),%edx
+ movl %edx,P_LINK(%eax)
+ movl P_RLINK(%ecx),%eax
+ movl %eax,P_RLINK(%edx)
+
+ cmpl P_LINK(%ecx),%esi # q empty
+ je 3f
+ btsl %ebx,%edi # nope, set to indicate full
+3:
+ movl %edi,_whichqs # update q status
+
+ movl $0,%eax
+ movl %eax,_want_resched
+
+#ifdef DIAGNOSTIC
+ cmpl %eax,P_WCHAN(%ecx)
+ jne badsw
+ cmpb $ SRUN,P_STAT(%ecx)
+ jne badsw
+#endif
+
+ movl %eax,P_RLINK(%ecx) /* isolate process to run */
+ movl P_ADDR(%ecx),%edx
+ movl PCB_CR3(%edx),%ebx
+
+ /* switch address space */
+ movl %ebx,%cr3
+
+ /* restore context */
+ movl PCB_EBX(%edx), %ebx
+ movl PCB_ESP(%edx), %esp
+ movl PCB_EBP(%edx), %ebp
+ movl PCB_ESI(%edx), %esi
+ movl PCB_EDI(%edx), %edi
+ movl PCB_EIP(%edx), %eax
+ movl %eax, (%esp)
+
+ movl PCB_CMAP2(%edx),%eax # get temporary map
+ movl %eax,_CMAP2 # reload temporary map PTE
+
+ movl %ecx,_curproc # into next process
+ movl %edx,_curpcb
+
+ pushl %edx # save p to return
+/*
+ * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
+ * I think restoring the cpl is unnecessary, but we must turn off the cli
+ * now that spl*() don't do it as a side affect.
+ */
+ pushl PCB_IML(%edx)
+ sti
+ SHOW_STI
+#if 0
+ call _splx
+#endif
+ addl $4,%esp
+/*
+ * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
+ * same way. Better return a value.
+ */
+ popl %eax # return (p);
+ ret
+
+ENTRY(mvesp)
+ movl %esp,%eax
+ ret
+/*
+ * struct proc *swtch_to_inactive(p) ; struct proc *p;
+ *
+ * At exit of a process, move off the address space of the
+ * process and onto a "safe" one. Then, on a temporary stack
+ * return and run code that disposes of the old state.
+ * Since this code requires a parameter from the "old" stack,
+ * pass it back as a return value.
+ */
+ENTRY(swtch_to_inactive)
+ popl %edx # old pc
+ popl %eax # arg, our return value
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3 # good bye address space
+ #write buffer?
+ movl $tmpstk-4,%esp # temporary stack, compensated for call
+ jmp %edx # return, execute remainder of cleanup
+
+/*
+ * savectx(pcb, altreturn)
+ * Update pcb, saving current processor state and arranging
+ * for alternate return ala longjmp in swtch if altreturn is true.
+ */
+ENTRY(savectx)
+ movl 4(%esp), %ecx
+ movw _cpl, %ax
+ movw %ax, PCB_IML(%ecx)
+ movl (%esp), %eax
+ movl %eax, PCB_EIP(%ecx)
+ movl %ebx, PCB_EBX(%ecx)
+ movl %esp, PCB_ESP(%ecx)
+ movl %ebp, PCB_EBP(%ecx)
+ movl %esi, PCB_ESI(%ecx)
+ movl %edi, PCB_EDI(%ecx)
+
+#ifdef NPX
+ /*
+ * If npxproc == NULL, then the npx h/w state is irrelevant and the
+ * state had better already be in the pcb. This is true for forks
+ * but not for dumps (the old book-keeping with FP flags in the pcb
+ * always lost for dumps because the dump pcb has 0 flags).
+ *
+ * If npxproc != NULL, then we have to save the npx h/w state to
+ * npxproc's pcb and copy it to the requested pcb, or save to the
+ * requested pcb and reload. Copying is easier because we would
+ * have to handle h/w bugs for reloading. We used to lose the
+ * parent's npx state for forks by forgetting to reload.
+ */
+ mov _npxproc,%eax
+ testl %eax,%eax
+ je 1f
+
+ pushl %ecx
+ movl P_ADDR(%eax),%eax
+ leal PCB_SAVEFPU(%eax),%eax
+ pushl %eax
+ pushl %eax
+ call _npxsave
+ popl %eax
+ popl %eax
+ popl %ecx
+
+ pushl %ecx
+ pushl $108+8*2 /* XXX h/w state size + padding */
+ leal PCB_SAVEFPU(%ecx),%ecx
+ pushl %ecx
+ pushl %eax
+ call _bcopy
+ addl $12,%esp
+ popl %ecx
+1:
+#endif
+
+ movl _CMAP2, %edx # save temporary map PTE
+ movl %edx, PCB_CMAP2(%ecx) # in our context
+
+ cmpl $0, 8(%esp)
+ je 1f
+ movl %esp, %edx # relocate current sp relative to pcb
+ subl $_kstack, %edx # (sp is relative to kstack):
+ addl %edx, %ecx # pcb += sp - kstack;
+ movl %eax, (%ecx) # write return pc at (relocated) sp@
+ # this mess deals with replicating register state gcc hides
+ movl 12(%esp),%eax
+ movl %eax,12(%ecx)
+ movl 16(%esp),%eax
+ movl %eax,16(%ecx)
+ movl 20(%esp),%eax
+ movl %eax,20(%ecx)
+ movl 24(%esp),%eax
+ movl %eax,24(%ecx)
+1:
+ xorl %eax, %eax # return 0
+ ret
+
+/*
+ * addupc(int pc, struct uprof *up, int ticks):
+ * update profiling information for the user process.
+ */
+
+ENTRY(addupc)
+ pushl %ebp
+ movl %esp,%ebp
+ movl 12(%ebp),%edx /* up */
+ movl 8(%ebp),%eax /* pc */
+
+ subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
+ jl L1 /* if (pc < 0) return */
+
+ shrl $1,%eax /* praddr = pc >> 1 */
+ imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
+ shrl $15,%eax /* praddr = praddr << 15 */
+ andl $-2,%eax /* praddr &= ~1 */
+
+ cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
+ ja L1
+
+/* addl %eax,%eax /* praddr -> word offset */
+ addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
+ movl 16(%ebp),%ecx /* ticks */
+
+ movl _curpcb,%edx
+ movl $proffault,PCB_ONFAULT(%edx)
+ addl %ecx,(%eax) /* storage location += ticks */
+ movl $0,PCB_ONFAULT(%edx)
+L1:
+ leave
+ ret
+
+ ALIGN_TEXT
+proffault:
+ /* if we get a fault, then kill profiling all together */
+ movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
+ movl 12(%ebp),%ecx
+ movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
+ leave
+ ret
+
+ # To be done:
+ ENTRY(astoff)
+ ret
+
+ .data
+ ALIGN_DATA
+ .globl _cyloffset, _curpcb
+_cyloffset: .long 0
+ .globl _proc0paddr
+_proc0paddr: .long 0
+LF: .asciz "swtch %x"
+ ALIGN_DATA
+
+#if 0
+#define PANIC(msg) xorl %eax,%eax; movl %eax,_waittime; pushl 1f; \
+ call _panic; MSG(msg)
+#define PRINTF(n,msg) pushal ; nop ; pushl 1f; call _printf; MSG(msg) ; \
+ popl %eax ; popal
+#define MSG(msg) .data; 1: .asciz msg; ALIGN_DATA; .text
+#endif /* 0 */
+
+/*
+ * Trap and fault vector routines
+ *
+ * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
+ * control. The sti's give the standard losing behaviour for ddb and kgdb.
+ */
+#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
+#define TRAP(a) pushl $(a) ; jmp alltraps
+#ifdef KGDB
+#define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
+#else
+#define BPTTRAP(a) sti; TRAP(a)
+#endif
+
+ .text
+IDTVEC(div)
+ pushl $0; TRAP(T_DIVIDE)
+IDTVEC(dbg)
+#ifdef BDBTRAP
+ BDBTRAP(dbg)
+#endif
+ pushl $0; BPTTRAP(T_TRCTRAP)
+IDTVEC(nmi)
+ pushl $0; TRAP(T_NMI)
+IDTVEC(bpt)
+#ifdef BDBTRAP
+ BDBTRAP(bpt)
+#endif
+ pushl $0; BPTTRAP(T_BPTFLT)
+IDTVEC(ofl)
+ pushl $0; TRAP(T_OFLOW)
+IDTVEC(bnd)
+ pushl $0; TRAP(T_BOUND)
+IDTVEC(ill)
+ pushl $0; TRAP(T_PRIVINFLT)
+IDTVEC(dna)
+ pushl $0; TRAP(T_DNA)
+IDTVEC(dble)
+ TRAP(T_DOUBLEFLT)
+ /*PANIC("Double Fault");*/
+IDTVEC(fpusegm)
+ pushl $0; TRAP(T_FPOPFLT)
+IDTVEC(tss)
+ TRAP(T_TSSFLT)
+ /*PANIC("TSS not valid");*/
+IDTVEC(missing)
+ TRAP(T_SEGNPFLT)
+IDTVEC(stk)
+ TRAP(T_STKFLT)
+IDTVEC(prot)
+ TRAP(T_PROTFLT)
+IDTVEC(page)
+ TRAP(T_PAGEFLT)
+IDTVEC(rsvd)
+ pushl $0; TRAP(T_RESERVED)
+IDTVEC(fpu)
+#ifdef NPX
+ /*
+ * Handle like an interrupt so that we can call npxintr to clear the
+ * error. It would be better to handle npx interrupts as traps but
+ * this is difficult for nested interrupts.
+ */
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop /* silly, the bug is for popal and it only
+ * bites when the next instruction has a
+ * complicated address mode */
+ pushl %ds
+ pushl %es /* now the stack frame is a trap frame */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ pushl _cpl
+ pushl $0 /* dummy unit to finish building intr frame */
+ incl _cnt+V_TRAP
+ call _npxintr
+ jmp doreti
+#else
+ pushl $0; TRAP(T_ARITHTRAP)
+#endif
+ /* 17 - 31 reserved for future exp */
+IDTVEC(rsvd0)
+ pushl $0; TRAP(17)
+IDTVEC(rsvd1)
+ pushl $0; TRAP(18)
+IDTVEC(rsvd2)
+ pushl $0; TRAP(19)
+IDTVEC(rsvd3)
+ pushl $0; TRAP(20)
+IDTVEC(rsvd4)
+ pushl $0; TRAP(21)
+IDTVEC(rsvd5)
+ pushl $0; TRAP(22)
+IDTVEC(rsvd6)
+ pushl $0; TRAP(23)
+IDTVEC(rsvd7)
+ pushl $0; TRAP(24)
+IDTVEC(rsvd8)
+ pushl $0; TRAP(25)
+IDTVEC(rsvd9)
+ pushl $0; TRAP(26)
+IDTVEC(rsvd10)
+ pushl $0; TRAP(27)
+IDTVEC(rsvd11)
+ pushl $0; TRAP(28)
+IDTVEC(rsvd12)
+ pushl $0; TRAP(29)
+IDTVEC(rsvd13)
+ pushl $0; TRAP(30)
+IDTVEC(rsvd14)
+ pushl $0; TRAP(31)
+
+ SUPERALIGN_TEXT
+alltraps:
+ pushal
+ nop
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+calltrap:
+ incl _cnt+V_TRAP
+ call _trap
+ /*
+ * Return through doreti to handle ASTs. Have to change trap frame
+ * to interrupt frame.
+ */
+ movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
+ pushl _cpl
+ pushl $0 /* dummy unit */
+ jmp doreti
+
+#ifdef KGDB
+/*
+ * This code checks for a kgdb trap, then falls through
+ * to the regular trap code.
+ */
+ ALIGN_TEXT
+bpttraps:
+ pushal
+ nop
+ pushl %es
+ pushl %ds
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
+ # non-kernel mode?
+ jne calltrap # yes
+ call _kgdb_trap_glue
+ jmp calltrap
+#endif
+
+/*
+ * Call gate entry for syscall
+ */
+
+ SUPERALIGN_TEXT
+IDTVEC(syscall)
+ pushfl # only for stupid carry bit and more stupid wait3 cc kludge
+ # XXX - also for direction flag (bzero, etc. clear it)
+ pushal # only need eax,ecx,edx - trap resaves others
+ nop
+ movl $KDSEL,%eax # switch to kernel segments
+ movl %ax,%ds
+ movl %ax,%es
+ incl _cnt+V_SYSCALL # kml 3/25/93
+ call _syscall
+ /*
+ * Return through doreti to handle ASTs. Have to change syscall frame
+ * to interrupt frame.
+ *
+ * XXX - we should have set up the frame earlier to avoid the
+ * following popal/pushal (not much can be done to avoid shuffling
+ * the flags). Consistent frames would simplify things all over.
+ */
+ movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
+ movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
+ movl 32+8(%esp),%ecx
+ movl %ebx,32+0(%esp)
+ movl %ecx,32+4(%esp)
+ movl %eax,32+8(%esp)
+ popal
+ nop
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop
+ movl __udatasel,%eax /* switch back to user segments */
+ push %eax /* XXX - better to preserve originals? */
+ push %eax
+ pushl _cpl
+ pushl $0
+ jmp doreti
+
+ENTRY(htonl)
+ENTRY(ntohl)
+ movl 4(%esp),%eax
+#ifdef i486
+ /* XXX */
+ /* Since Gas 1.38 does not grok bswap this has been coded as the
+ * equivalent bytes. This can be changed back to bswap when we
+ * upgrade to a newer version of Gas */
+ /* bswap %eax */
+ .byte 0x0f
+ .byte 0xc8
+#else
+ xchgb %al,%ah
+ roll $16,%eax
+ xchgb %al,%ah
+#endif
+ ret
+
+ENTRY(htons)
+ENTRY(ntohs)
+ movzwl 4(%esp),%eax
+ xchgb %al,%ah
+ ret
+
+#ifdef SHOW_A_LOT
+
+/*
+ * 'show_bits' was too big when defined as a macro. The line length for some
+ * enclosing macro was too big for gas. Perhaps the code would have blown
+ * the cache anyway.
+ */
+
+ ALIGN_TEXT
+show_bits:
+ pushl %eax
+ SHOW_BIT(0)
+ SHOW_BIT(1)
+ SHOW_BIT(2)
+ SHOW_BIT(3)
+ SHOW_BIT(4)
+ SHOW_BIT(5)
+ SHOW_BIT(6)
+ SHOW_BIT(7)
+ SHOW_BIT(8)
+ SHOW_BIT(9)
+ SHOW_BIT(10)
+ SHOW_BIT(11)
+ SHOW_BIT(12)
+ SHOW_BIT(13)
+ SHOW_BIT(14)
+ SHOW_BIT(15)
+ popl %eax
+ ret
+
+ .data
+bit_colors:
+ .byte GREEN,RED,0,0
+ .text
+
+#endif /* SHOW_A_LOT */
+
+#include "i386/isa/vector.s"
+#include "i386/isa/icu.s"
diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s
new file mode 100644
index 0000000..d558dba
--- /dev/null
+++ b/sys/amd64/amd64/locore.s
@@ -0,0 +1,1830 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)locore.s 7.3 (Berkeley) 5/13/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 5 00158
+ * -------------------- ----- ----------------------
+ *
+ * 06 Aug 92 Pace Willisson Allow VGA memory to be mapped
+ * 28 Nov 92 Frank MacLachlan Aligned addresses and data
+ * on 32bit boundaries.
+ * 25 Mar 93 Kevin Lahey Add syscall counter for vmstat
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ * 25 Apr 93 Bruce Evans Support new interrupt code (intr-0.1)
+ */
+
+
+/*
+ * locore.s: 4BSD machine support for the Intel 386
+ * Preliminary version
+ * Written by William F. Jolitz, 386BSD Project
+ */
+
+#include "assym.s"
+#include "machine/psl.h"
+#include "machine/pte.h"
+
+#include "errno.h"
+
+#include "machine/trap.h"
+
+#include "machine/specialreg.h"
+#include "i386/isa/debug.h"
+
+#define KDSEL 0x10
+#define SEL_RPL_MASK 0x0003
+#define TRAPF_CS_OFF (13 * 4)
+
+/*
+ * Note: This version greatly munged to avoid various assembler errors
+ * that may be fixed in newer versions of gas. Perhaps newer versions
+ * will have more pleasant appearance.
+ */
+
+ .set IDXSHIFT,10
+ .set SYSTEM,0xFE000000 # virtual address of system start
+ /*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */
+ .set SYSPDROFF,0x3F8 # Page dir index of System Base
+
+#define ALIGN_DATA .align 2
+#define ALIGN_TEXT .align 2,0x90 /* 4-byte boundaries, NOP-filled */
+#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte boundaries better for 486 */
+
+/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
+/* XXX: NOP and FASTER_NOP are misleadingly named */
+#ifdef BROKEN_HARDWARE_AND_OR_SOFTWARE /* XXX - rarely necessary */
+#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
+#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
+#else
+#define FASTER_NOP
+#define NOP
+#endif
+
+/*
+ * PTmap is recursive pagemap at top of virtual address space.
+ * Within PTmap, the page directory can be found (third indirection).
+ */
+ .set PDRPDROFF,0x3F7 # Page dir index of Page dir
+ .globl _PTmap, _PTD, _PTDpde, _Sysmap
+ .set _PTmap,0xFDC00000
+ .set _PTD,0xFDFF7000
+ .set _Sysmap,0xFDFF8000
+ .set _PTDpde,0xFDFF7000+4*PDRPDROFF
+
+/*
+ * APTmap, APTD is the alternate recursive pagemap.
+ * It's used when modifying another process's page tables.
+ */
+ .set APDRPDROFF,0x3FE # Page dir index of Page dir
+ .globl _APTmap, _APTD, _APTDpde
+ .set _APTmap,0xFF800000
+ .set _APTD,0xFFBFE000
+ .set _APTDpde,0xFDFF7000+4*APDRPDROFF
+
+/*
+ * Access to each processes kernel stack is via a region of
+ * per-process address space (at the beginning), immediatly above
+ * the user process stack.
+ */
+ .set _kstack, USRSTACK
+ .globl _kstack
+ .set PPDROFF,0x3F6
+ .set PPTEOFF,0x400-UPAGES # 0x3FE
+
+#define ENTRY(name) \
+ .globl _/**/name; ALIGN_TEXT; _/**/name:
+#define ALTENTRY(name) ENTRY(name)
+
+/*
+ * Initialization
+ */
+ .data
+ .globl _cpu,_cold,_boothowto,_bootdev,_cyloffset,_atdevbase,_atdevphys
+_cpu: .long 0 # are we 386, 386sx, or 486
+_cold: .long 1 # cold till we are not
+_atdevbase: .long 0 # location of start of iomem in virtual
+_atdevphys: .long 0 # location of device mapping ptes (phys)
+
+ .globl _IdlePTD, _KPTphys
+_IdlePTD: .long 0
+_KPTphys: .long 0
+
+ .space 512
+tmpstk:
+ .text
+ .globl start
+start: movw $0x1234,%ax
+ movw %ax,0x472 # warm boot
+ jmp 1f
+ .space 0x500 # skip over warm boot shit
+
+ /*
+ * pass parameters on stack (howto, bootdev, unit, cyloffset)
+ * note: (%esp) is return address of boot
+ * ( if we want to hold onto /boot, it's physical %esp up to _end)
+ */
+
+ 1: movl 4(%esp),%eax
+ movl %eax,_boothowto-SYSTEM
+ movl 8(%esp),%eax
+ movl %eax,_bootdev-SYSTEM
+ movl 12(%esp),%eax
+ movl %eax, _cyloffset-SYSTEM
+
+ /*
+ * Finished with old stack; load new %esp now instead of later so
+ * we can trace this code without having to worry about the trace
+ * trap clobbering the memory test or the zeroing of the bss+bootstrap
+ * page tables.
+ *
+ * XXX - wdboot clears the bss after testing that this is safe.
+ * This is too wasteful - memory below 640K is scarce. The boot
+ * program should check:
+ * text+data <= &stack_variable - more_space_for_stack
+ * text+data+bss+pad+space_for_page_tables <= end_of_memory
+ * Oops, the gdt is in the carcass of the boot program so clearing
+ * the rest of memory is still not possible.
+ */
+ movl $ tmpstk-SYSTEM,%esp # bootstrap stack end location
+
+#ifdef garbage
+ /* count up memory */
+
+ xorl %eax,%eax # start with base memory at 0x0
+ #movl $ 0xA0000/NBPG,%ecx # look every 4K up to 640K
+ movl $ 0xA0,%ecx # look every 4K up to 640K
+1: movl (%eax),%ebx # save location to check
+ movl $0xa55a5aa5,(%eax) # write test pattern
+ /* flush stupid cache here! (with bcopy (0,0,512*1024) ) */
+ cmpl $0xa55a5aa5,(%eax) # does not check yet for rollover
+ jne 2f
+ movl %ebx,(%eax) # restore memory
+ addl $ NBPG,%eax
+ loop 1b
+2: shrl $12,%eax
+ movl %eax,_Maxmem-SYSTEM
+
+ movl $0x100000,%eax # next, talley remaining memory
+ #movl $((0xFFF000-0x100000)/NBPG),%ecx
+ movl $(0xFFF-0x100),%ecx
+1: movl (%eax),%ebx # save location to check
+ movl $0xa55a5aa5,(%eax) # write test pattern
+ cmpl $0xa55a5aa5,(%eax) # does not check yet for rollover
+ jne 2f
+ movl %ebx,(%eax) # restore memory
+ addl $ NBPG,%eax
+ loop 1b
+2: shrl $12,%eax
+ movl %eax,_Maxmem-SYSTEM
+#endif
+
+/* find end of kernel image */
+ movl $_end-SYSTEM,%ecx
+ addl $ NBPG-1,%ecx
+ andl $~(NBPG-1),%ecx
+ movl %ecx,%esi
+
+/* clear bss and memory for bootstrap pagetables. */
+ movl $_edata-SYSTEM,%edi
+ subl %edi,%ecx
+ addl $(UPAGES+5)*NBPG,%ecx
+/*
+ * Virtual address space of kernel:
+ *
+ * text | data | bss | page dir | proc0 kernel stack | usr stk map | Sysmap
+ * 0 1 2 3 4
+ */
+ xorl %eax,%eax # pattern
+ cld
+ rep
+ stosb
+
+ movl %esi,_IdlePTD-SYSTEM /*physical address of Idle Address space */
+
+#define fillkpt \
+1: movl %eax,(%ebx) ; \
+ addl $ NBPG,%eax ; /* increment physical address */ \
+ addl $4,%ebx ; /* next pte */ \
+ loop 1b ;
+
+/*
+ * Map Kernel
+ * N.B. don't bother with making kernel text RO, as 386
+ * ignores R/W AND U/S bits on kernel access (only v works) !
+ *
+ * First step - build page tables
+ */
+ movl %esi,%ecx # this much memory,
+ shrl $ PGSHIFT,%ecx # for this many pte s
+ addl $ UPAGES+4,%ecx # including our early context
+ movl $0xa0,%ecx # XXX - cover debugger pages
+ movl $PG_V|PG_KW,%eax # having these bits set,
+ lea (4*NBPG)(%esi),%ebx # physical address of KPT in proc 0,
+ movl %ebx,_KPTphys-SYSTEM # in the kernel page table,
+ fillkpt
+
+/* map I/O memory map */
+
+ movl $0x100-0xa0,%ecx # for this many pte s,
+ movl $(0xa0000|PG_V|PG_UW),%eax # having these bits set,(perhaps URW?) XXX 06 Aug 92
+ movl %ebx,_atdevphys-SYSTEM # remember phys addr of ptes
+ fillkpt
+
+ /* map proc 0's kernel stack into user page table page */
+
+ movl $ UPAGES,%ecx # for this many pte s,
+ lea (1*NBPG)(%esi),%eax # physical address in proc 0
+ lea (SYSTEM)(%eax),%edx
+ movl %edx,_proc0paddr-SYSTEM # remember VA for 0th process init
+ orl $PG_V|PG_KW,%eax # having these bits set,
+ lea (3*NBPG)(%esi),%ebx # physical address of stack pt in proc 0
+ addl $(PPTEOFF*4),%ebx
+ fillkpt
+
+/*
+ * Construct a page table directory
+ * (of page directory elements - pde's)
+ */
+ /* install a pde for temporary double map of bottom of VA */
+ lea (4*NBPG)(%esi),%eax # physical address of kernel page table
+ orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92
+ movl %eax,(%esi) # which is where temp maps!
+
+ /* kernel pde's */
+ movl $ 3,%ecx # for this many pde s,
+ lea (SYSPDROFF*4)(%esi), %ebx # offset of pde for kernel
+ fillkpt
+
+ /* install a pde recursively mapping page directory as a page table! */
+ movl %esi,%eax # phys address of ptd in proc 0
+ orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92
+ movl %eax, PDRPDROFF*4(%esi) # which is where PTmap maps!
+
+ /* install a pde to map kernel stack for proc 0 */
+ lea (3*NBPG)(%esi),%eax # physical address of pt in proc 0
+ orl $PG_V|PG_KW,%eax # pde entry is valid
+ movl %eax,PPDROFF*4(%esi) # which is where kernel stack maps!
+
+ /* copy and convert stuff from old gdt and idt for debugger */
+
+ cmpl $0x0375c339,0x96104 # XXX - debugger signature
+ jne 1f
+ movb $1,_bdb_exists-SYSTEM
+1:
+ pushal
+ subl $2*6,%esp
+
+ sgdt (%esp)
+ movl 2(%esp),%esi # base address of current gdt
+ movl $_gdt-SYSTEM,%edi
+ movl %edi,2(%esp)
+ movl $8*18/4,%ecx
+ rep # copy gdt
+ movsl
+ movl $_gdt-SYSTEM,-8+2(%edi) # adjust gdt self-ptr
+ movb $0x92,-8+5(%edi)
+
+ sidt 6(%esp)
+ movl 6+2(%esp),%esi # base address of current idt
+ movl 8+4(%esi),%eax # convert dbg descriptor to ...
+ movw 8(%esi),%ax
+ movl %eax,bdb_dbg_ljmp+1-SYSTEM # ... immediate offset ...
+ movl 8+2(%esi),%eax
+ movw %ax,bdb_dbg_ljmp+5-SYSTEM # ... and selector for ljmp
+ movl 24+4(%esi),%eax # same for bpt descriptor
+ movw 24(%esi),%ax
+ movl %eax,bdb_bpt_ljmp+1-SYSTEM
+ movl 24+2(%esi),%eax
+ movw %ax,bdb_bpt_ljmp+5-SYSTEM
+
+ movl $_idt-SYSTEM,%edi
+ movl %edi,6+2(%esp)
+ movl $8*4/4,%ecx
+ rep # copy idt
+ movsl
+
+ lgdt (%esp)
+ lidt 6(%esp)
+
+ addl $2*6,%esp
+ popal
+
+ /* load base of page directory, and enable mapping */
+ movl %esi,%eax # phys address of ptd in proc 0
+ orl $ I386_CR3PAT,%eax
+ movl %eax,%cr3 # load ptd addr into mmu
+ movl %cr0,%eax # get control word
+#ifdef USE_486_WRITE_PROTECT
+ orl $CR0_PE|CR0_PG|CR0_WP,%eax # and let s page!
+#else
+ orl $CR0_PE|CR0_PG,%eax # and let s page!
+#endif
+ movl %eax,%cr0 # NOW!
+
+ pushl $begin # jump to high mem!
+ ret
+
+begin: /* now running relocated at SYSTEM where the system is linked to run */
+
+ .globl _Crtat
+ movl _Crtat,%eax
+ subl $0xfe0a0000,%eax
+ movl _atdevphys,%edx # get pte PA
+ subl _KPTphys,%edx # remove base of ptes, now have phys offset
+ shll $ PGSHIFT-2,%edx # corresponding to virt offset
+ addl $ SYSTEM,%edx # add virtual base
+ movl %edx, _atdevbase
+ addl %eax,%edx
+ movl %edx,_Crtat
+
+ /* set up bootstrap stack */
+ movl $ _kstack+UPAGES*NBPG-4*12,%esp # bootstrap stack end location
+ xorl %eax,%eax # mark end of frames
+ movl %eax,%ebp
+ movl _proc0paddr, %eax
+ movl %esi, PCB_CR3(%eax)
+
+ lea 7*NBPG(%esi),%esi # skip past stack.
+ pushl %esi
+
+ /* relocate debugger gdt entries */
+
+ movl $_gdt+8*9,%eax # adjust slots 9-17
+ movl $9,%ecx
+reloc_gdt:
+ movb $0xfe,7(%eax) # top byte of base addresses, was 0,
+ addl $8,%eax # now SYSTEM>>24
+ loop reloc_gdt
+
+ cmpl $0,_bdb_exists
+ je 1f
+ int $3
+1:
+
+ call _init386 # wire 386 chip for unix operation
+
+ movl $0,_PTD
+ call _main
+ popl %esi
+
+ .globl __ucodesel,__udatasel
+ movl __ucodesel,%eax
+ movl __udatasel,%ecx
+ # build outer stack frame
+ pushl %ecx # user ss
+ pushl $ USRSTACK # user esp
+ pushl %eax # user cs
+ pushl $0 # user ip
+ movl %cx,%ds
+ movl %cx,%es
+ movl %ax,%fs # double map cs to fs
+ movl %cx,%gs # and ds to gs
+ lret # goto user!
+
+ pushl $lretmsg1 /* "should never get here!" */
+ call _panic
+lretmsg1:
+ .asciz "lret: toinit\n"
+
+
+ .set exec,59
+ .set exit,1
+
+#define LCALL(x,y) .byte 0x9a ; .long y; .word x
+/*
+ * Icode is copied out to process 1 to exec /etc/init.
+ * If the exec fails, process 1 exits.
+ */
+ENTRY(icode)
+ # pushl $argv-_icode # gas fucks up again
+ movl $argv,%eax
+ subl $_icode,%eax
+ pushl %eax
+
+ # pushl $init-_icode
+ movl $init,%eax
+ subl $_icode,%eax
+ pushl %eax
+ pushl %eax # dummy out rta
+
+ movl %esp,%ebp
+ movl $exec,%eax
+ LCALL(0x7,0x0)
+ pushl %eax
+ movl $exit,%eax
+ pushl %eax # dummy out rta
+ LCALL(0x7,0x0)
+
+init:
+ .asciz "/sbin/init"
+ ALIGN_DATA
+argv:
+ .long init+6-_icode # argv[0] = "init" ("/sbin/init" + 6)
+ .long eicode-_icode # argv[1] follows icode after copyout
+ .long 0
+eicode:
+
+ .globl _szicode
+_szicode:
+ .long _szicode-_icode
+
+ENTRY(sigcode)
+ call 12(%esp)
+ lea 28(%esp),%eax # scp (the call may have clobbered the
+ # copy at 8(%esp))
+ # XXX - use genassym
+ pushl %eax
+ pushl %eax # junk to fake return address
+ movl $103,%eax # sigreturn()
+ LCALL(0x7,0) # enter kernel with args on stack
+ hlt # never gets here
+
+ .globl _szsigcode
+_szsigcode:
+ .long _szsigcode-_sigcode
+
+ /*
+ * Support routines for GCC
+ */
+ENTRY(__udivsi3)
+ movl 4(%esp),%eax
+ xorl %edx,%edx
+ divl 8(%esp)
+ ret
+
+ENTRY(__divsi3)
+ movl 4(%esp),%eax
+ cltd
+ idivl 8(%esp)
+ ret
+
+ /*
+ * I/O bus instructions via C
+ */
+ENTRY(inb)
+ movl 4(%esp),%edx
+ subl %eax,%eax # clr eax
+ NOP
+ inb %dx,%al
+ ret
+
+
+ENTRY(inw)
+ movl 4(%esp),%edx
+ subl %eax,%eax # clr eax
+ NOP
+ inw %dx,%ax
+ ret
+
+
+ENTRY(rtcin)
+ movl 4(%esp),%eax
+ outb %al,$0x70
+ subl %eax,%eax # clr eax
+ inb $0x71,%al
+ ret
+
+ENTRY(outb)
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outb %al,%dx
+ NOP
+ ret
+
+ENTRY(outw)
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outw %ax,%dx
+ NOP
+ ret
+
+ /*
+ * void bzero(void *base, u_int cnt)
+ */
+
+ENTRY(bzero)
+ pushl %edi
+ movl 8(%esp),%edi
+ movl 12(%esp),%ecx
+ xorl %eax,%eax
+ shrl $2,%ecx
+ cld
+ rep
+ stosl
+ movl 12(%esp),%ecx
+ andl $3,%ecx
+ rep
+ stosb
+ popl %edi
+ ret
+
+ /*
+ * fillw (pat,base,cnt)
+ */
+
+ENTRY(fillw)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosw
+ popl %edi
+ ret
+
+ENTRY(bcopyb)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards. */
+ addl %ecx,%esi
+ std
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyw)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $1,%ecx /* copy by 16-bit words */
+ rep
+ movsw
+ adc %ecx,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $1,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 16-bit words */
+ shrl $1,%ecx
+ decl %esi
+ decl %edi
+ rep
+ movsw
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyx)
+ movl 16(%esp),%eax
+ cmpl $2,%eax
+ je _bcopyw
+ cmpl $4,%eax
+ jne _bcopyb
+ /*
+ * Fall through to bcopy. ENTRY() provides harmless fill bytes.
+ */
+
+ /*
+ * (ov)bcopy (src,dst,cnt)
+ * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
+ * Changed by bde to not bother returning %eax = 0.
+ */
+
+ENTRY(ovbcopy)
+ENTRY(bcopy)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $2,%ecx /* copy by 32-bit words */
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $3,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 32-bit words */
+ shrl $2,%ecx
+ subl $3,%esi
+ subl $3,%edi
+ rep
+ movsl
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+#ifdef notdef
+ENTRY(copyout)
+ movl _curpcb, %eax
+ movl $cpyflt, PCB_ONFAULT(%eax) # in case we page/protection violate
+ pushl %esi
+ pushl %edi
+ pushl %ebx
+ movl 16(%esp), %esi
+ movl 20(%esp), %edi
+ movl 24(%esp), %ebx
+
+ /* first, check to see if "write fault" */
+1: movl %edi, %eax
+#ifdef notyet
+ shrl $IDXSHIFT, %eax /* fetch pte associated with address */
+ andb $0xfc, %al
+ movl _PTmap(%eax), %eax
+
+ andb $7, %al /* if we are the one case that won't trap... */
+ cmpb $5, %al
+ jne 2f
+ /* ... then simulate the trap! */
+ pushl %edi
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+
+ cmpl $0, %eax /* if not ok, return */
+ jne cpyflt
+ /* otherwise, continue with reference */
+2:
+ movl %edi, %eax /* calculate remainder this pass */
+ andl $0xfffff000, %eax
+ movl $NBPG, %ecx
+ subl %eax, %ecx
+ cmpl %ecx, %ebx
+ jle 3f
+ movl %ebx, %ecx
+3: subl %ecx, %ebx
+ movl %ecx, %edx
+#else
+ movl %ebx, %ecx
+ movl %ebx, %edx
+#endif
+
+ shrl $2,%ecx /* movem */
+ cld
+ rep
+ movsl
+ movl %edx, %ecx /* don't depend on ecx here! */
+ andl $3, %ecx
+ rep
+ movsb
+
+#ifdef notyet
+ cmpl $0, %ebx
+ jl 1b
+#endif
+
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
+ pushl %esi
+ pushl %edi
+ pushl %ebx # XXX - not used, but affects stack offsets
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx
+ rep
+ movsb
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+cpyflt:
+ popl %ebx
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $ EFAULT,%eax
+ ret
+#else
+ENTRY(copyout)
+ movl _curpcb,%eax
+ movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+cpyflt: popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $ EFAULT,%eax
+ ret
+
+#endif
+
+ # insb(port,addr,cnt)
+ENTRY(insb)
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ insb
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ # insw(port,addr,cnt)
+ENTRY(insw)
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ .byte 0x66,0xf2,0x6d # rep insw
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ # outsw(port,addr,cnt)
+ENTRY(outsw)
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ .byte 0x66,0xf2,0x6f # rep outsw
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ # outsb(port,addr,cnt)
+ENTRY(outsb)
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ outsb
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ /*
+ * void lgdt(struct region_descriptor *rdp);
+ */
+ENTRY(lgdt)
+ /* reload the descriptor table */
+ movl 4(%esp),%eax
+ lgdt (%eax)
+ /* flush the prefetch q */
+ jmp 1f
+ nop
+1:
+ /* reload "stale" selectors */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ movl %ax,%ss
+
+ /* reload code selector by turning return into intersegmental return */
+ movl (%esp),%eax
+ pushl %eax
+ # movl $KCSEL,4(%esp)
+ movl $8,4(%esp)
+ lret
+
+ /*
+ * void lidt(struct region_descriptor *rdp);
+ */
+ENTRY(lidt)
+ movl 4(%esp),%eax
+ lidt (%eax)
+ ret
+
+ /*
+ * void lldt(u_short sel)
+ */
+ENTRY(lldt)
+ lldt 4(%esp)
+ ret
+
+ /*
+ * void ltr(u_short sel)
+ */
+ENTRY(ltr)
+ ltr 4(%esp)
+ ret
+
+ /*
+ * void lcr3(caddr_t cr3)
+ */
+ ALIGN_TEXT
+ENTRY(load_cr3)
+ALTENTRY(lcr3)
+ movl 4(%esp),%eax
+ orl $ I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+ # tlbflush()
+ENTRY(tlbflush)
+ movl %cr3,%eax
+ orl $ I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+ # lcr0(cr0)
+ENTRY(lcr0)
+ALTENTRY(load_cr0)
+ movl 4(%esp),%eax
+ movl %eax,%cr0
+ ret
+
+ # rcr0()
+ENTRY(rcr0)
+ movl %cr0,%eax
+ ret
+
+ # rcr2()
+ENTRY(rcr2)
+ movl %cr2,%eax
+ ret
+
+ # rcr3()
+ENTRY(_cr3)
+ALTENTRY(rcr3)
+ movl %cr3,%eax
+ ret
+
+ # ssdtosd(*ssdp,*sdp)
+ENTRY(ssdtosd)
+ pushl %ebx
+ movl 8(%esp),%ecx
+ movl 8(%ecx),%ebx
+ shll $16,%ebx
+ movl (%ecx),%edx
+ roll $16,%edx
+ movb %dh,%bl
+ movb %dl,%bh
+ rorl $8,%ebx
+ movl 4(%ecx),%eax
+ movw %ax,%dx
+ andl $0xf0000,%eax
+ orl %eax,%ebx
+ movl 12(%esp),%ecx
+ movl %edx,(%ecx)
+ movl %ebx,4(%ecx)
+ popl %ebx
+ ret
+
+/*
+ * {fu,su},{byte,word}
+ */
+ALTENTRY(fuiword)
+ENTRY(fuword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ .byte 0x65 # use gs
+ movl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(fusword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ .byte 0x65 # use gs
+ movzwl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(fuibyte)
+ENTRY(fubyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ .byte 0x65 # use gs
+ movzbl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ ALIGN_TEXT
+fusufault:
+ movl _curpcb,%ecx
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ decl %eax
+ ret
+
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+
+#ifdef notdef
+ shrl $IDXSHIFT, %edx /* fetch pte associated with address */
+ andb $0xfc, %dl
+ movl _PTmap(%edx), %edx
+
+ andb $7, %dl /* if we are the one case that won't trap... */
+ cmpb $5 , %edx
+ jne 1f
+ /* ... then simulate the trap! */
+ pushl %edi
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+ cmpl $0, %eax /* if not ok, return */
+ jne fusufault
+ movl 8(%esp),%eax /* otherwise, continue with reference */
+1:
+ movl 4(%esp),%edx
+#endif
+ .byte 0x65 # use gs
+ movl %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+#ifdef notdef
+shrl $IDXSHIFT, %edx /* calculate pte address */
+andb $0xfc, %dl
+movl _PTmap(%edx), %edx
+andb $7, %edx /* if we are the one case that won't trap... */
+cmpb $5 , %edx
+jne 1f
+/* ..., then simulate the trap! */
+ pushl %edi
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+movl _curpcb, %ecx # restore trashed registers
+cmpl $0, %eax /* if not ok, return */
+jne fusufault
+movl 8(%esp),%eax
+1: movl 4(%esp),%edx
+#endif
+ .byte 0x65 # use gs
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ ret
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+#ifdef notdef
+shrl $IDXSHIFT, %edx /* calculate pte address */
+andb $0xfc, %dl
+movl _PTmap(%edx), %edx
+andb $7, %edx /* if we are the one case that won't trap... */
+cmpb $5 , %edx
+jne 1f
+/* ..., then simulate the trap! */
+ pushl %edi
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+movl _curpcb, %ecx # restore trashed registers
+cmpl $0, %eax /* if not ok, return */
+jne fusufault
+movl 8(%esp),%eax
+1: movl 4(%esp),%edx
+#endif
+ .byte 0x65 # use gs
+ movb %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate
+ ret
+
+ENTRY(setjmp)
+ movl 4(%esp),%eax
+ movl %ebx, (%eax) # save ebx
+ movl %esp, 4(%eax) # save esp
+ movl %ebp, 8(%eax) # save ebp
+ movl %esi,12(%eax) # save esi
+ movl %edi,16(%eax) # save edi
+ movl (%esp),%edx # get rta
+ movl %edx,20(%eax) # save eip
+ xorl %eax,%eax # return (0);
+ ret
+
+ENTRY(longjmp)
+ movl 4(%esp),%eax
+ movl (%eax),%ebx # restore ebx
+ movl 4(%eax),%esp # restore esp
+ movl 8(%eax),%ebp # restore ebp
+ movl 12(%eax),%esi # restore esi
+ movl 16(%eax),%edi # restore edi
+ movl 20(%eax),%edx # get rta
+ movl %edx,(%esp) # put in return frame
+ xorl %eax,%eax # return (1);
+ incl %eax
+ ret
+/*
+ * The following primitives manipulate the run queues.
+ * _whichqs tells which of the 32 queues _qs
+ * have processes in them. Setrq puts processes into queues, Remrq
+ * removes them from queues. The running process is on no queue,
+ * other processes are on a queue related to p->p_pri, divided by 4
+ * actually to shrink the 0-127 range of priorities into the 32 available
+ * queues.
+ */
+
+ .globl _whichqs,_qs,_cnt,_panic
+ .comm _noproc,4
+ .comm _runrun,4
+
+/*
+ * Setrq(p)
+ *
+ * Call should be made at spl6(), and p->p_stat should be SRUN
+ */
+ENTRY(setrq)
+ movl 4(%esp),%eax
+ cmpl $0,P_RLINK(%eax) # should not be on q already
+ je set1
+ pushl $set2
+ call _panic
+set1:
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btsl %edx,_whichqs # set q full bit
+ shll $3,%edx
+ addl $_qs,%edx # locate q hdr
+ movl %edx,P_LINK(%eax) # link process on tail of q
+ movl P_RLINK(%edx),%ecx
+ movl %ecx,P_RLINK(%eax)
+ movl %eax,P_RLINK(%edx)
+ movl %eax,P_LINK(%ecx)
+ ret
+
+set2: .asciz "setrq"
+
+/*
+ * Remrq(p)
+ *
+ * Call should be made at spl6().
+ */
+ENTRY(remrq)
+ movl 4(%esp),%eax
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btrl %edx,_whichqs # clear full bit, panic if clear already
+ jb rem1
+ pushl $rem3
+ call _panic
+rem1:
+ pushl %edx
+ movl P_LINK(%eax),%ecx # unlink process
+ movl P_RLINK(%eax),%edx
+ movl %edx,P_RLINK(%ecx)
+ movl P_RLINK(%eax),%ecx
+ movl P_LINK(%eax),%edx
+ movl %edx,P_LINK(%ecx)
+ popl %edx
+ movl $_qs,%ecx
+ shll $3,%edx
+ addl %edx,%ecx
+ cmpl P_LINK(%ecx),%ecx # q still has something?
+ je rem2
+ shrl $3,%edx # yes, set bit as still full
+ btsl %edx,_whichqs
+rem2:
+ movl $0,P_RLINK(%eax) # zap reverse link to indicate off list
+ ret
+
+rem3: .asciz "remrq"
+sw0: .asciz "swtch"
+
+/*
+ * When no processes are on the runq, Swtch branches to idle
+ * to wait for something to come ready.
+ */
+ .globl Idle
+ ALIGN_TEXT
+Idle:
+sti_for_idle:
+ sti
+ SHOW_STI
+ ALIGN_TEXT
+idle:
+ call _spl0
+ cmpl $0,_whichqs
+ jne sw1
+ hlt # wait for interrupt
+ jmp idle
+
+ SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
+badsw:
+ pushl $sw0
+ call _panic
+ /*NOTREACHED*/
+
+/*
+ * Swtch()
+ */
+ENTRY(swtch)
+
+ incl _cnt+V_SWTCH
+
+ /* switch to new process. first, save context as needed */
+
+ movl _curproc,%ecx
+
+ /* if no process to save, don't bother */
+ testl %ecx,%ecx
+ je sw1
+
+ movl P_ADDR(%ecx),%ecx
+
+ movl (%esp),%eax # Hardware registers
+ movl %eax, PCB_EIP(%ecx)
+ movl %ebx, PCB_EBX(%ecx)
+ movl %esp, PCB_ESP(%ecx)
+ movl %ebp, PCB_EBP(%ecx)
+ movl %esi, PCB_ESI(%ecx)
+ movl %edi, PCB_EDI(%ecx)
+
+#ifdef NPX
+ /* have we used fp, and need a save? */
+ mov _curproc,%eax
+ cmp %eax,_npxproc
+ jne 1f
+ pushl %ecx /* h/w bugs make saving complicated */
+ leal PCB_SAVEFPU(%ecx),%eax
+ pushl %eax
+ call _npxsave /* do it in a big C function */
+ popl %eax
+ popl %ecx
+1:
+#endif
+
+ movl _CMAP2,%eax # save temporary map PTE
+ movl %eax,PCB_CMAP2(%ecx) # in our context
+ movl $0,_curproc # out of process
+
+ # movw _cpl, %ax
+ # movw %ax, PCB_IML(%ecx) # save ipl
+
+ /* save is done, now choose a new process or idle */
+sw1:
+ cli
+ SHOW_CLI
+ movl _whichqs,%edi
+2:
+ # XXX - bsf is sloow
+ bsfl %edi,%eax # find a full q
+ je sti_for_idle # if none, idle
+ # XX update whichqs?
+swfnd:
+ btrl %eax,%edi # clear q full status
+ jnb 2b # if it was clear, look for another
+ movl %eax,%ebx # save which one we are using
+
+ shll $3,%eax
+ addl $_qs,%eax # select q
+ movl %eax,%esi
+
+#ifdef DIAGNOSTIC
+ cmpl P_LINK(%eax),%eax # linked to self? (e.g. not on list)
+ je badsw # not possible
+#endif
+
+ movl P_LINK(%eax),%ecx # unlink from front of process q
+ movl P_LINK(%ecx),%edx
+ movl %edx,P_LINK(%eax)
+ movl P_RLINK(%ecx),%eax
+ movl %eax,P_RLINK(%edx)
+
+ cmpl P_LINK(%ecx),%esi # q empty
+ je 3f
+ btsl %ebx,%edi # nope, set to indicate full
+3:
+ movl %edi,_whichqs # update q status
+
+ movl $0,%eax
+ movl %eax,_want_resched
+
+#ifdef DIAGNOSTIC
+ cmpl %eax,P_WCHAN(%ecx)
+ jne badsw
+ cmpb $ SRUN,P_STAT(%ecx)
+ jne badsw
+#endif
+
+ movl %eax,P_RLINK(%ecx) /* isolate process to run */
+ movl P_ADDR(%ecx),%edx
+ movl PCB_CR3(%edx),%ebx
+
+ /* switch address space */
+ movl %ebx,%cr3
+
+ /* restore context */
+ movl PCB_EBX(%edx), %ebx
+ movl PCB_ESP(%edx), %esp
+ movl PCB_EBP(%edx), %ebp
+ movl PCB_ESI(%edx), %esi
+ movl PCB_EDI(%edx), %edi
+ movl PCB_EIP(%edx), %eax
+ movl %eax, (%esp)
+
+ movl PCB_CMAP2(%edx),%eax # get temporary map
+ movl %eax,_CMAP2 # reload temporary map PTE
+
+ movl %ecx,_curproc # into next process
+ movl %edx,_curpcb
+
+ pushl %edx # save p to return
+/*
+ * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
+ * I think restoring the cpl is unnecessary, but we must turn off the cli
+ * now that spl*() don't do it as a side affect.
+ */
+ pushl PCB_IML(%edx)
+ sti
+ SHOW_STI
+#if 0
+ call _splx
+#endif
+ addl $4,%esp
+/*
+ * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
+ * same way. Better return a value.
+ */
+ popl %eax # return (p);
+ ret
+
+ENTRY(mvesp)
+ movl %esp,%eax
+ ret
+/*
+ * struct proc *swtch_to_inactive(p) ; struct proc *p;
+ *
+ * At exit of a process, move off the address space of the
+ * process and onto a "safe" one. Then, on a temporary stack
+ * return and run code that disposes of the old state.
+ * Since this code requires a parameter from the "old" stack,
+ * pass it back as a return value.
+ */
+ENTRY(swtch_to_inactive)
+ popl %edx # old pc
+ popl %eax # arg, our return value
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3 # good bye address space
+ #write buffer?
+ movl $tmpstk-4,%esp # temporary stack, compensated for call
+ jmp %edx # return, execute remainder of cleanup
+
+/*
+ * savectx(pcb, altreturn)
+ * Update pcb, saving current processor state and arranging
+ * for alternate return ala longjmp in swtch if altreturn is true.
+ */
+ENTRY(savectx)
+ movl 4(%esp), %ecx
+ movw _cpl, %ax
+ movw %ax, PCB_IML(%ecx)
+ movl (%esp), %eax
+ movl %eax, PCB_EIP(%ecx)
+ movl %ebx, PCB_EBX(%ecx)
+ movl %esp, PCB_ESP(%ecx)
+ movl %ebp, PCB_EBP(%ecx)
+ movl %esi, PCB_ESI(%ecx)
+ movl %edi, PCB_EDI(%ecx)
+
+#ifdef NPX
+ /*
+ * If npxproc == NULL, then the npx h/w state is irrelevant and the
+ * state had better already be in the pcb. This is true for forks
+ * but not for dumps (the old book-keeping with FP flags in the pcb
+ * always lost for dumps because the dump pcb has 0 flags).
+ *
+ * If npxproc != NULL, then we have to save the npx h/w state to
+ * npxproc's pcb and copy it to the requested pcb, or save to the
+ * requested pcb and reload. Copying is easier because we would
+ * have to handle h/w bugs for reloading. We used to lose the
+ * parent's npx state for forks by forgetting to reload.
+ */
+ mov _npxproc,%eax
+ testl %eax,%eax
+ je 1f
+
+ pushl %ecx
+ movl P_ADDR(%eax),%eax
+ leal PCB_SAVEFPU(%eax),%eax
+ pushl %eax
+ pushl %eax
+ call _npxsave
+ popl %eax
+ popl %eax
+ popl %ecx
+
+ pushl %ecx
+ pushl $108+8*2 /* XXX h/w state size + padding */
+ leal PCB_SAVEFPU(%ecx),%ecx
+ pushl %ecx
+ pushl %eax
+ call _bcopy
+ addl $12,%esp
+ popl %ecx
+1:
+#endif
+
+ movl _CMAP2, %edx # save temporary map PTE
+ movl %edx, PCB_CMAP2(%ecx) # in our context
+
+ cmpl $0, 8(%esp)
+ je 1f
+ movl %esp, %edx # relocate current sp relative to pcb
+ subl $_kstack, %edx # (sp is relative to kstack):
+ addl %edx, %ecx # pcb += sp - kstack;
+ movl %eax, (%ecx) # write return pc at (relocated) sp@
+ # this mess deals with replicating register state gcc hides
+ movl 12(%esp),%eax
+ movl %eax,12(%ecx)
+ movl 16(%esp),%eax
+ movl %eax,16(%ecx)
+ movl 20(%esp),%eax
+ movl %eax,20(%ecx)
+ movl 24(%esp),%eax
+ movl %eax,24(%ecx)
+1:
+ xorl %eax, %eax # return 0
+ ret
+
+/*
+ * addupc(int pc, struct uprof *up, int ticks):
+ * update profiling information for the user process.
+ */
+
+ENTRY(addupc)
+ pushl %ebp
+ movl %esp,%ebp
+ movl 12(%ebp),%edx /* up */
+ movl 8(%ebp),%eax /* pc */
+
+ subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
+ jl L1 /* if (pc < 0) return */
+
+ shrl $1,%eax /* praddr = pc >> 1 */
+ imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
+ shrl $15,%eax /* praddr = praddr << 15 */
+ andl $-2,%eax /* praddr &= ~1 */
+
+ cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
+ ja L1
+
+/* addl %eax,%eax /* praddr -> word offset */
+ addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
+ movl 16(%ebp),%ecx /* ticks */
+
+ movl _curpcb,%edx
+ movl $proffault,PCB_ONFAULT(%edx)
+ addl %ecx,(%eax) /* storage location += ticks */
+ movl $0,PCB_ONFAULT(%edx)
+L1:
+ leave
+ ret
+
+ ALIGN_TEXT
+proffault:
+ /* if we get a fault, then kill profiling all together */
+ movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
+ movl 12(%ebp),%ecx
+ movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
+ leave
+ ret
+
+ # To be done:
+ ENTRY(astoff)
+ ret
+
+ .data
+ ALIGN_DATA
+ .globl _cyloffset, _curpcb
+_cyloffset: .long 0
+ .globl _proc0paddr
+_proc0paddr: .long 0
+LF: .asciz "swtch %x"
+ ALIGN_DATA
+
+#if 0
+#define PANIC(msg) xorl %eax,%eax; movl %eax,_waittime; pushl 1f; \
+ call _panic; MSG(msg)
+#define PRINTF(n,msg) pushal ; nop ; pushl 1f; call _printf; MSG(msg) ; \
+ popl %eax ; popal
+#define MSG(msg) .data; 1: .asciz msg; ALIGN_DATA; .text
+#endif /* 0 */
+
+/*
+ * Trap and fault vector routines
+ *
+ * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
+ * control. The sti's give the standard losing behaviour for ddb and kgdb.
+ */
+#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
+#define TRAP(a) pushl $(a) ; jmp alltraps
+#ifdef KGDB
+#define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
+#else
+#define BPTTRAP(a) sti; TRAP(a)
+#endif
+
+ .text
+IDTVEC(div)
+ pushl $0; TRAP(T_DIVIDE)
+IDTVEC(dbg)
+#ifdef BDBTRAP
+ BDBTRAP(dbg)
+#endif
+ pushl $0; BPTTRAP(T_TRCTRAP)
+IDTVEC(nmi)
+ pushl $0; TRAP(T_NMI)
+IDTVEC(bpt)
+#ifdef BDBTRAP
+ BDBTRAP(bpt)
+#endif
+ pushl $0; BPTTRAP(T_BPTFLT)
+IDTVEC(ofl)
+ pushl $0; TRAP(T_OFLOW)
+IDTVEC(bnd)
+ pushl $0; TRAP(T_BOUND)
+IDTVEC(ill)
+ pushl $0; TRAP(T_PRIVINFLT)
+IDTVEC(dna)
+ pushl $0; TRAP(T_DNA)
+IDTVEC(dble)
+ TRAP(T_DOUBLEFLT)
+ /*PANIC("Double Fault");*/
+IDTVEC(fpusegm)
+ pushl $0; TRAP(T_FPOPFLT)
+IDTVEC(tss)
+ TRAP(T_TSSFLT)
+ /*PANIC("TSS not valid");*/
+IDTVEC(missing)
+ TRAP(T_SEGNPFLT)
+IDTVEC(stk)
+ TRAP(T_STKFLT)
+IDTVEC(prot)
+ TRAP(T_PROTFLT)
+IDTVEC(page)
+ TRAP(T_PAGEFLT)
+IDTVEC(rsvd)
+ pushl $0; TRAP(T_RESERVED)
+IDTVEC(fpu)
+#ifdef NPX
+ /*
+ * Handle like an interrupt so that we can call npxintr to clear the
+ * error. It would be better to handle npx interrupts as traps but
+ * this is difficult for nested interrupts.
+ */
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop /* silly, the bug is for popal and it only
+ * bites when the next instruction has a
+ * complicated address mode */
+ pushl %ds
+ pushl %es /* now the stack frame is a trap frame */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ pushl _cpl
+ pushl $0 /* dummy unit to finish building intr frame */
+ incl _cnt+V_TRAP
+ call _npxintr
+ jmp doreti
+#else
+ pushl $0; TRAP(T_ARITHTRAP)
+#endif
+ /* 17 - 31 reserved for future exp */
+IDTVEC(rsvd0)
+ pushl $0; TRAP(17)
+IDTVEC(rsvd1)
+ pushl $0; TRAP(18)
+IDTVEC(rsvd2)
+ pushl $0; TRAP(19)
+IDTVEC(rsvd3)
+ pushl $0; TRAP(20)
+IDTVEC(rsvd4)
+ pushl $0; TRAP(21)
+IDTVEC(rsvd5)
+ pushl $0; TRAP(22)
+IDTVEC(rsvd6)
+ pushl $0; TRAP(23)
+IDTVEC(rsvd7)
+ pushl $0; TRAP(24)
+IDTVEC(rsvd8)
+ pushl $0; TRAP(25)
+IDTVEC(rsvd9)
+ pushl $0; TRAP(26)
+IDTVEC(rsvd10)
+ pushl $0; TRAP(27)
+IDTVEC(rsvd11)
+ pushl $0; TRAP(28)
+IDTVEC(rsvd12)
+ pushl $0; TRAP(29)
+IDTVEC(rsvd13)
+ pushl $0; TRAP(30)
+IDTVEC(rsvd14)
+ pushl $0; TRAP(31)
+
+ SUPERALIGN_TEXT
+alltraps:
+ pushal
+ nop
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+calltrap:
+ incl _cnt+V_TRAP
+ call _trap
+ /*
+ * Return through doreti to handle ASTs. Have to change trap frame
+ * to interrupt frame.
+ */
+ movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
+ pushl _cpl
+ pushl $0 /* dummy unit */
+ jmp doreti
+
+#ifdef KGDB
+/*
+ * This code checks for a kgdb trap, then falls through
+ * to the regular trap code.
+ */
+ ALIGN_TEXT
+bpttraps:
+ pushal
+ nop
+ pushl %es
+ pushl %ds
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
+ # non-kernel mode?
+ jne calltrap # yes
+ call _kgdb_trap_glue
+ jmp calltrap
+#endif
+
+/*
+ * Call gate entry for syscall
+ */
+
+ SUPERALIGN_TEXT
+IDTVEC(syscall)
+ pushfl # only for stupid carry bit and more stupid wait3 cc kludge
+ # XXX - also for direction flag (bzero, etc. clear it)
+ pushal # only need eax,ecx,edx - trap resaves others
+ nop
+ movl $KDSEL,%eax # switch to kernel segments
+ movl %ax,%ds
+ movl %ax,%es
+ incl _cnt+V_SYSCALL # kml 3/25/93
+ call _syscall
+ /*
+ * Return through doreti to handle ASTs. Have to change syscall frame
+ * to interrupt frame.
+ *
+ * XXX - we should have set up the frame earlier to avoid the
+ * following popal/pushal (not much can be done to avoid shuffling
+ * the flags). Consistent frames would simplify things all over.
+ */
+ movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
+ movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
+ movl 32+8(%esp),%ecx
+ movl %ebx,32+0(%esp)
+ movl %ecx,32+4(%esp)
+ movl %eax,32+8(%esp)
+ popal
+ nop
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop
+ movl __udatasel,%eax /* switch back to user segments */
+ push %eax /* XXX - better to preserve originals? */
+ push %eax
+ pushl _cpl
+ pushl $0
+ jmp doreti
+
+ENTRY(htonl)
+ENTRY(ntohl)
+ movl 4(%esp),%eax
+#ifdef i486
+ /* XXX */
+ /* Since Gas 1.38 does not grok bswap this has been coded as the
+ * equivalent bytes. This can be changed back to bswap when we
+ * upgrade to a newer version of Gas */
+ /* bswap %eax */
+ .byte 0x0f
+ .byte 0xc8
+#else
+ xchgb %al,%ah
+ roll $16,%eax
+ xchgb %al,%ah
+#endif
+ ret
+
+ENTRY(htons)
+ENTRY(ntohs)
+ movzwl 4(%esp),%eax
+ xchgb %al,%ah
+ ret
+
+#ifdef SHOW_A_LOT
+
+/*
+ * 'show_bits' was too big when defined as a macro. The line length for some
+ * enclosing macro was too big for gas. Perhaps the code would have blown
+ * the cache anyway.
+ */
+
+ ALIGN_TEXT
+show_bits:
+ pushl %eax
+ SHOW_BIT(0)
+ SHOW_BIT(1)
+ SHOW_BIT(2)
+ SHOW_BIT(3)
+ SHOW_BIT(4)
+ SHOW_BIT(5)
+ SHOW_BIT(6)
+ SHOW_BIT(7)
+ SHOW_BIT(8)
+ SHOW_BIT(9)
+ SHOW_BIT(10)
+ SHOW_BIT(11)
+ SHOW_BIT(12)
+ SHOW_BIT(13)
+ SHOW_BIT(14)
+ SHOW_BIT(15)
+ popl %eax
+ ret
+
+ .data
+bit_colors:
+ .byte GREEN,RED,0,0
+ .text
+
+#endif /* SHOW_A_LOT */
+
+#include "i386/isa/vector.s"
+#include "i386/isa/icu.s"
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
new file mode 100644
index 0000000..f0351a7
--- /dev/null
+++ b/sys/amd64/amd64/machdep.c
@@ -0,0 +1,1131 @@
+/*-
+ * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
+ * Copyright (c) 1992 Terrence R. Lambert.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)machdep.c 7.4 (Berkeley) 6/3/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 5 00158
+ * -------------------- ----- ----------------------
+ *
+ * 15 Aug 92 William Jolitz Large memory bug
+ * 15 Aug 92 Terry Lambert Fixed CMOS RAM size bug
+ * 25 Mar 93 Sean Eric Fagan Added #ifdef HZ around microtime for
+ * the new microtime.s routine
+ * 08 Apr 93 Andrew Herbert Fixes for kmem_alloc panics
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ * 25 Apr 93 Bruce Evans New intr-0.1 code
+ */
+static char rcsid[] = "$Header: /usr/src/sys.386bsd/i386/i386/RCS/machdep.c,v 1.2 92/01/21 14:22:09 william Exp Locker: root $";
+
+
+#include <stddef.h>
+#include "param.h"
+#include "systm.h"
+#include "signalvar.h"
+#include "kernel.h"
+#include "proc.h"
+#include "user.h"
+#include "buf.h"
+#include "reboot.h"
+#include "conf.h"
+#include "file.h"
+#include "callout.h"
+#include "malloc.h"
+#include "mbuf.h"
+#include "msgbuf.h"
+#include "net/netisr.h"
+
+#include "vm/vm.h"
+#include "vm/vm_kern.h"
+#include "vm/vm_page.h"
+
+extern vm_offset_t avail_end;
+
+#include "machine/cpu.h"
+#include "machine/reg.h"
+#include "machine/psl.h"
+#include "machine/specialreg.h"
+#include "i386/isa/rtc.h"
+
+
+#define EXPECT_BASEMEM 640 /* The expected base memory*/
+#define INFORM_WAIT 1 /* Set to pause berfore crash in weird cases*/
+
+/*
+ * Declare these as initialized data so we can patch them.
+ */
+int nswbuf = 0;
+#ifdef NBUF
+int nbuf = NBUF;
+#else
+int nbuf = 0;
+#endif
+#ifdef BUFPAGES
+int bufpages = BUFPAGES;
+#else
+int bufpages = 0;
+#endif
+int msgbufmapped; /* set when safe to use msgbuf */
+extern int freebufspace;
+
+/*
+ * Machine-dependent startup code
+ */
+int boothowto = 0, Maxmem = 0;
+long dumplo;
+int physmem, maxmem;
+extern int bootdev;
+#ifdef SMALL
+extern int forcemaxmem;
+#endif
+int biosmem;
+
+extern cyloffset;
+
+cpu_startup()
+{
+ register int unixsize;
+ register unsigned i;
+ register struct pte *pte;
+ int mapaddr, j;
+ register caddr_t v;
+ int maxbufs, base, residual;
+ extern long Usrptsize;
+ vm_offset_t minaddr, maxaddr;
+ vm_size_t size;
+ int firstaddr;
+
+ /*
+ * Initialize error message buffer (at end of core).
+ */
+
+ /* avail_end was pre-decremented in pmap_bootstrap to compensate */
+ for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
+ pmap_enter(pmap_kernel(), msgbufp, avail_end + i * NBPG,
+ VM_PROT_ALL, TRUE);
+ msgbufmapped = 1;
+
+#ifdef KDB
+ kdb_init(); /* startup kernel debugger */
+#endif
+ /*
+ * Good {morning,afternoon,evening,night}.
+ */
+ /*printf(version);
+ printf("real mem = %d\n", ctob(physmem));*/
+
+ /*
+ * Allocate space for system data structures.
+ * The first available kernel virtual address is in "v".
+ * As pages of kernel virtual memory are allocated, "v" is incremented.
+ * As pages of memory are allocated and cleared,
+ * "firstaddr" is incremented.
+ * An index into the kernel page table corresponding to the
+ * virtual memory address maintained in "v" is kept in "mapaddr".
+ */
+
+ /*
+ * Make two passes. The first pass calculates how much memory is
+ * needed and allocates it. The second pass assigns virtual
+ * addresses to the various data structures.
+ */
+ firstaddr = 0;
+again:
+ v = (caddr_t)firstaddr;
+
+#define valloc(name, type, num) \
+ (name) = (type *)v; v = (caddr_t)((name)+(num))
+#define valloclim(name, type, num, lim) \
+ (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
+ valloc(callout, struct callout, ncallout);
+#ifdef SYSVSHM
+ valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
+#endif
+ /*
+ * Determine how many buffers to allocate.
+ * Use 10% of memory for the first 2 Meg, 5% of the remaining
+ * memory. Insure a minimum of 16 buffers.
+ * We allocate 1/2 as many swap buffer headers as file i/o buffers.
+ */
+ if (bufpages == 0)
+ if (physmem < (2 * 1024 * 1024))
+ bufpages = physmem / 10 / CLSIZE;
+ else
+ bufpages = ((2 * 1024 * 1024 + physmem) / 20) / CLSIZE;
+ /*
+ * 15 Aug 92 William Jolitz bufpages fix for too large
+ */
+ bufpages = min( NKMEMCLUSTERS*2/5, bufpages);
+
+ if (nbuf == 0) {
+ nbuf = bufpages / 2;
+ if (nbuf < 16)
+ nbuf = 16;
+ }
+ freebufspace = bufpages * NBPG;
+ if (nswbuf == 0) {
+ nswbuf = (nbuf / 2) &~ 1; /* force even */
+ if (nswbuf > 256)
+ nswbuf = 256; /* sanity */
+ }
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
+
+ /*
+ * End of first pass, size has been calculated so allocate memory
+ */
+ if (firstaddr == 0) {
+ size = (vm_size_t)(v - firstaddr);
+ firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
+ if (firstaddr == 0)
+ panic("startup: no room for tables");
+ goto again;
+ }
+ /*
+ * End of second pass, addresses have been assigned
+ */
+ if ((vm_size_t)(v - firstaddr) != size)
+ panic("startup: table size inconsistency");
+ /*
+ * Allocate a submap for buffer space allocations.
+ */
+ buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ bufpages*NBPG, TRUE);
+ /*
+ * Allocate a submap for physio
+ */
+ phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ VM_PHYS_SIZE, TRUE);
+
+ /*
+ * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
+ * we use the more space efficient malloc in place of kmem_alloc.
+ */
+ mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
+ M_MBUF, M_NOWAIT);
+ bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
+ mb_map = kmem_suballoc(kernel_map, (vm_offset_t)&mbutl, &maxaddr,
+ VM_MBUF_SIZE, FALSE);
+ /*
+ * Initialize callouts
+ */
+ callfree = callout;
+ for (i = 1; i < ncallout; i++)
+ callout[i-1].c_next = &callout[i];
+
+ /*printf("avail mem = %d\n", ptoa(vm_page_free_count));*/
+
+ /*
+ * Set up CPU-specific registers, cache, etc.
+ */
+ initcpu();
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+
+ /*
+ * Configure the system.
+ */
+ configure();
+}
+
+#ifdef PGINPROF
+/*
+ * Return the difference (in microseconds)
+ * between the current time and a previous
+ * time as represented by the arguments.
+ * If there is a pending clock interrupt
+ * which has not been serviced due to high
+ * ipl, return error code.
+ */
+/*ARGSUSED*/
+vmtime(otime, olbolt, oicr)
+ register int otime, olbolt, oicr;
+{
+
+ return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
+}
+#endif
+
+struct sigframe {
+ int sf_signum;
+ int sf_code;
+ struct sigcontext *sf_scp;
+ sig_t sf_handler;
+ int sf_eax;
+ int sf_edx;
+ int sf_ecx;
+ struct sigcontext sf_sc;
+} ;
+
+extern int kstack[];
+
+/*
+ * Send an interrupt to process.
+ *
+ * Stack is set up to allow sigcode stored
+ * in u. to call routine, followed by kcall
+ * to sigreturn routine below. After sigreturn
+ * resets the signal mask, the stack, and the
+ * frame pointer, it returns to the user
+ * specified pc, psl.
+ */
+void
+sendsig(catcher, sig, mask, code)
+ sig_t catcher;
+ int sig, mask;
+ unsigned code;
+{
+ register struct proc *p = curproc;
+ register int *regs;
+ register struct sigframe *fp;
+ struct sigacts *ps = p->p_sigacts;
+ int oonstack, frmtrap;
+
+ regs = p->p_regs;
+ oonstack = ps->ps_onstack;
+ frmtrap = curpcb->pcb_flags & FM_TRAP;
+ /*
+ * Allocate and validate space for the signal handler
+ * context. Note that if the stack is in P0 space, the
+ * call to grow() is a nop, and the useracc() check
+ * will fail if the process has not already allocated
+ * the space with a `brk'.
+ */
+ if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(ps->ps_sigsp
+ - sizeof(struct sigframe));
+ ps->ps_onstack = 1;
+ } else {
+ if (frmtrap)
+ fp = (struct sigframe *)(regs[tESP]
+ - sizeof(struct sigframe));
+ else
+ fp = (struct sigframe *)(regs[sESP]
+ - sizeof(struct sigframe));
+ }
+
+ if ((unsigned)fp <= (unsigned)p->p_vmspace->vm_maxsaddr + MAXSSIZ - ctob(p->p_vmspace->vm_ssize))
+ (void)grow(p, (unsigned)fp);
+
+ if (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+ SIGACTION(p, SIGILL) = SIG_DFL;
+ sig = sigmask(SIGILL);
+ p->p_sigignore &= ~sig;
+ p->p_sigcatch &= ~sig;
+ p->p_sigmask &= ~sig;
+ psignal(p, SIGILL);
+ return;
+ }
+
+ /*
+ * Build the argument list for the signal handler.
+ */
+ fp->sf_signum = sig;
+ fp->sf_code = code;
+ fp->sf_scp = &fp->sf_sc;
+ fp->sf_handler = catcher;
+
+ /* save scratch registers */
+ if(frmtrap) {
+ fp->sf_eax = regs[tEAX];
+ fp->sf_edx = regs[tEDX];
+ fp->sf_ecx = regs[tECX];
+ } else {
+ fp->sf_eax = regs[sEAX];
+ fp->sf_edx = regs[sEDX];
+ fp->sf_ecx = regs[sECX];
+ }
+ /*
+ * Build the signal context to be used by sigreturn.
+ */
+ fp->sf_sc.sc_onstack = oonstack;
+ fp->sf_sc.sc_mask = mask;
+ if(frmtrap) {
+ fp->sf_sc.sc_sp = regs[tESP];
+ fp->sf_sc.sc_fp = regs[tEBP];
+ fp->sf_sc.sc_pc = regs[tEIP];
+ fp->sf_sc.sc_ps = regs[tEFLAGS];
+ regs[tESP] = (int)fp;
+ regs[tEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
+ } else {
+ fp->sf_sc.sc_sp = regs[sESP];
+ fp->sf_sc.sc_fp = regs[sEBP];
+ fp->sf_sc.sc_pc = regs[sEIP];
+ fp->sf_sc.sc_ps = regs[sEFLAGS];
+ regs[sESP] = (int)fp;
+ regs[sEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
+ }
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * psl to gain improper priviledges or to cause
+ * a machine fault.
+ */
+sigreturn(p, uap, retval)
+ struct proc *p;
+ struct args {
+ struct sigcontext *sigcntxp;
+ } *uap;
+ int *retval;
+{
+ register struct sigcontext *scp;
+ register struct sigframe *fp;
+ register int *regs = p->p_regs;
+
+
+ /*
+ * (XXX old comment) regs[sESP] points to the return address.
+ * The user scp pointer is above that.
+ * The return address is faked in the signal trampoline code
+ * for consistency.
+ */
+ scp = uap->sigcntxp;
+ fp = (struct sigframe *)
+ ((caddr_t)scp - offsetof(struct sigframe, sf_sc));
+
+ if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0)
+ return(EINVAL);
+
+ /* restore scratch registers */
+ regs[sEAX] = fp->sf_eax ;
+ regs[sEDX] = fp->sf_edx ;
+ regs[sECX] = fp->sf_ecx ;
+
+ if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
+ return(EINVAL);
+#ifdef notyet
+ if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) {
+ return(EINVAL);
+ }
+#endif
+ p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
+ p->p_sigmask = scp->sc_mask &~
+ (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
+ regs[sEBP] = scp->sc_fp;
+ regs[sESP] = scp->sc_sp;
+ regs[sEIP] = scp->sc_pc;
+ regs[sEFLAGS] = scp->sc_ps;
+ return(EJUSTRETURN);
+}
+
+int waittime = -1;
+struct pcb dumppcb;
+
+boot(arghowto)
+ int arghowto;
+{
+ register long dummy; /* r12 is reserved */
+ register int howto; /* r11 == how to boot */
+ register int devtype; /* r10 == major of root dev */
+ extern char *panicstr;
+ extern int cold;
+ int nomsg = 1;
+
+ if(cold) {
+ printf("hit reset please");
+ for(;;);
+ }
+ howto = arghowto;
+ if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
+ register struct buf *bp;
+ int iter, nbusy;
+
+ waittime = 0;
+ (void) splnet();
+ /*
+ * Release inodes held by texts before update.
+ */
+ if (panicstr == 0)
+ vnode_pager_umount(NULL);
+ sync((struct sigcontext *)0);
+
+ for (iter = 0; iter < 20; iter++) {
+ nbusy = 0;
+ for (bp = &buf[nbuf]; --bp >= buf; )
+ if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
+ nbusy++;
+ if (nbusy == 0)
+ break;
+ if (nomsg) {
+ printf("updating disks before rebooting... ");
+ nomsg = 0;
+ }
+ /* printf("%d ", nbusy); */
+ DELAY(40000 * iter);
+ }
+ if (nbusy)
+ printf(" failed!\n");
+ else if (nomsg == 0)
+ printf("succeded.\n");
+ DELAY(10000); /* wait for printf to finish */
+ }
+ splhigh();
+ devtype = major(rootdev);
+ if (howto&RB_HALT) {
+ pg("\nThe operating system has halted. Please press any key to reboot.\n\n");
+ } else {
+ if (howto & RB_DUMP) {
+ savectx(&dumppcb, 0);
+ dumppcb.pcb_ptd = rcr3();
+ dumpsys();
+ /*NOTREACHED*/
+ }
+ }
+#ifdef lint
+ dummy = 0; dummy = dummy;
+ printf("howto %d, devtype %d\n", arghowto, devtype);
+#endif
+ cpu_reset();
+ for(;;) ;
+ /*NOTREACHED*/
+}
+
+int dumpmag = 0x8fca0101; /* magic number for savecore */
+int dumpsize = 0; /* also for savecore */
+/*
+ * Doadump comes here after turning off memory management and
+ * getting on the dump stack, either when called above, or by
+ * the auto-restart code.
+ */
+dumpsys()
+{
+
+ if (dumpdev == NODEV)
+ return;
+ if ((minor(dumpdev)&07) != 1)
+ return;
+ printf("\nThe operating system is saving a copy of RAM memory to device %x, offset %d\n\
+(hit any key to abort): [ amount left to save (MB) ] ", dumpdev, dumplo);
+ dumpsize = physmem;
+ switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
+
+ case ENXIO:
+ printf("-- device bad\n");
+ break;
+
+ case EFAULT:
+ printf("-- device not ready\n");
+ break;
+
+ case EINVAL:
+ printf("-- area improper\n");
+ break;
+
+ case EIO:
+ printf("-- i/o error\n");
+ break;
+
+ case EINTR:
+ printf("-- aborted from console\n");
+ break;
+
+ default:
+ printf(" succeeded\n");
+ break;
+ }
+ printf("system rebooting.\n\n");
+ DELAY(10000);
+}
+
+#ifdef HZ
+/*
+ * If HZ is defined we use this code, otherwise the code in
+ * /sys/i386/i386/microtime.s is used. The othercode only works
+ * for HZ=100.
+ */
+microtime(tvp)
+ register struct timeval *tvp;
+{
+ int s = splhigh();
+
+ *tvp = time;
+ tvp->tv_usec += tick;
+ while (tvp->tv_usec > 1000000) {
+ tvp->tv_sec++;
+ tvp->tv_usec -= 1000000;
+ }
+ splx(s);
+}
+#endif /* HZ */
+
+physstrat(bp, strat, prio)
+ struct buf *bp;
+ int (*strat)(), prio;
+{
+ register int s;
+ caddr_t baddr;
+
+ /*
+ * vmapbuf clobbers b_addr so we must remember it so that it
+ * can be restored after vunmapbuf. This is truely rude, we
+ * should really be storing this in a field in the buf struct
+ * but none are available and I didn't want to add one at
+ * this time. Note that b_addr for dirty page pushes is
+ * restored in vunmapbuf. (ugh!)
+ */
+ baddr = bp->b_un.b_addr;
+ vmapbuf(bp);
+ (*strat)(bp);
+ /* pageout daemon doesn't wait for pushed pages */
+ if (bp->b_flags & B_DIRTY)
+ return;
+ s = splbio();
+ while ((bp->b_flags & B_DONE) == 0)
+ sleep((caddr_t)bp, prio);
+ splx(s);
+ vunmapbuf(bp);
+ bp->b_un.b_addr = baddr;
+}
+
+initcpu()
+{
+}
+
+/*
+ * Clear registers on exec
+ */
+setregs(p, entry)
+ struct proc *p;
+ u_long entry;
+{
+
+ p->p_regs[sEBP] = 0; /* bottom of the fp chain */
+ p->p_regs[sEIP] = entry;
+
+ p->p_addr->u_pcb.pcb_flags = 0; /* no fp at all */
+ load_cr0(rcr0() | CR0_TS); /* start emulating */
+#ifdef NPX
+ npxinit(__INITIAL_NPXCW__);
+#endif
+}
+
+/*
+ * Initialize 386 and configure to run kernel
+ */
+
+/*
+ * Initialize segments & interrupt table
+ */
+
+
+#define GNULL_SEL 0 /* Null Descriptor */
+#define GCODE_SEL 1 /* Kernel Code Descriptor */
+#define GDATA_SEL 2 /* Kernel Data Descriptor */
+#define GLDT_SEL 3 /* LDT - eventually one per process */
+#define GTGATE_SEL 4 /* Process task switch gate */
+#define GPANIC_SEL 5 /* Task state to consider panic from */
+#define GPROC0_SEL 6 /* Task state process slot zero and up */
+#define NGDT GPROC0_SEL+1
+
+union descriptor gdt[GPROC0_SEL+1];
+
+/* interrupt descriptor table */
+struct gate_descriptor idt[NIDT];
+
+/* local descriptor table */
+union descriptor ldt[5];
+#define LSYS5CALLS_SEL 0 /* forced by intel BCS */
+#define LSYS5SIGR_SEL 1
+
+#define L43BSDCALLS_SEL 2 /* notyet */
+#define LUCODE_SEL 3
+#define LUDATA_SEL 4
+/* seperate stack, es,fs,gs sels ? */
+/* #define LPOSIXCALLS_SEL 5 /* notyet */
+
+struct i386tss tss, panic_tss;
+
+extern struct user *proc0paddr;
+
+/* software prototypes -- in more palitable form */
+struct soft_segment_descriptor gdt_segs[] = {
+ /* Null Descriptor */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0,0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Code Descriptor for kernel */
+{ 0x0, /* segment base address */
+ 0xfffff, /* length - all address space */
+ SDT_MEMERA, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0,0,
+ 1, /* default 32 vs 16 bit size */
+ 1 /* limit granularity (byte/page units)*/ },
+ /* Data Descriptor for kernel */
+{ 0x0, /* segment base address */
+ 0xfffff, /* length - all address space */
+ SDT_MEMRWA, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0,0,
+ 1, /* default 32 vs 16 bit size */
+ 1 /* limit granularity (byte/page units)*/ },
+ /* LDT Descriptor */
+{ (int) ldt, /* segment base address */
+ sizeof(ldt)-1, /* length - all address space */
+ SDT_SYSLDT, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0,0,
+ 0, /* unused - default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Null Descriptor - Placeholder */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0,0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Panic Tss Descriptor */
+{ (int) &panic_tss, /* segment base address */
+ sizeof(tss)-1, /* length - all address space */
+ SDT_SYS386TSS, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0,0,
+ 0, /* unused - default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Proc 0 Tss Descriptor */
+{ (int) kstack, /* segment base address */
+ sizeof(tss)-1, /* length - all address space */
+ SDT_SYS386TSS, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0,0,
+ 0, /* unused - default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ }};
+
+struct soft_segment_descriptor ldt_segs[] = {
+ /* Null Descriptor - overwritten by call gate */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0,0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Null Descriptor - overwritten by call gate */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0,0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Null Descriptor - overwritten by call gate */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0,0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Code Descriptor for user */
+{ 0x0, /* segment base address */
+ 0xfffff, /* length - all address space */
+ SDT_MEMERA, /* segment type */
+ SEL_UPL, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0,0,
+ 1, /* default 32 vs 16 bit size */
+ 1 /* limit granularity (byte/page units)*/ },
+ /* Data Descriptor for user */
+{ 0x0, /* segment base address */
+ 0xfffff, /* length - all address space */
+ SDT_MEMRWA, /* segment type */
+ SEL_UPL, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0,0,
+ 1, /* default 32 vs 16 bit size */
+ 1 /* limit granularity (byte/page units)*/ } };
+
+setidt(idx, func, typ, dpl) char *func; {
+ struct gate_descriptor *ip = idt + idx;
+
+ ip->gd_looffset = (int)func;
+ ip->gd_selector = 8;
+ ip->gd_stkcpy = 0;
+ ip->gd_xx = 0;
+ ip->gd_type = typ;
+ ip->gd_dpl = dpl;
+ ip->gd_p = 1;
+ ip->gd_hioffset = ((int)func)>>16 ;
+}
+
+#define IDTVEC(name) __CONCAT(X, name)
+extern IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
+ IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
+ IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
+ IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
+ IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
+ IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
+ IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
+ IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(rsvd14), IDTVEC(syscall);
+
+int lcr0(), lcr3(), rcr0(), rcr2();
+int _udatasel, _ucodesel, _gsel_tss;
+
+init386(first)
+{
+ extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
+ int x, *pi;
+ unsigned biosbasemem, biosextmem;
+ struct gate_descriptor *gdp;
+ extern int sigcode,szsigcode;
+ /* table descriptors - used to load tables by microp */
+ struct region_descriptor r_gdt, r_idt;
+ int pagesinbase, pagesinext;
+
+
+ proc0.p_addr = proc0paddr;
+
+ /*
+ * Initialize the console before we print anything out.
+ */
+
+ cninit (KERNBASE+0xa0000);
+
+ /* make gdt memory segments */
+ gdt_segs[GCODE_SEL].ssd_limit = btoc((int) &etext + NBPG);
+ for (x=0; x < NGDT; x++) ssdtosd(gdt_segs+x, gdt+x);
+ /* make ldt memory segments */
+ ldt_segs[LUCODE_SEL].ssd_limit = btoc(UPT_MIN_ADDRESS);
+ ldt_segs[LUDATA_SEL].ssd_limit = btoc(UPT_MIN_ADDRESS);
+ /* Note. eventually want private ldts per process */
+ for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
+
+ /* exceptions */
+ setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL);
+ setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL);
+ setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL);
+ setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL);
+ setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_KPL);
+ setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL);
+ setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL);
+ setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL);
+ setidt(8, &IDTVEC(dble), SDT_SYS386TGT, SEL_KPL);
+ setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL);
+ setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL);
+ setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL);
+ setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL);
+ setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL);
+ setidt(14, &IDTVEC(page), SDT_SYS386TGT, SEL_KPL);
+ setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL);
+ setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL);
+ setidt(17, &IDTVEC(rsvd0), SDT_SYS386TGT, SEL_KPL);
+ setidt(18, &IDTVEC(rsvd1), SDT_SYS386TGT, SEL_KPL);
+ setidt(19, &IDTVEC(rsvd2), SDT_SYS386TGT, SEL_KPL);
+ setidt(20, &IDTVEC(rsvd3), SDT_SYS386TGT, SEL_KPL);
+ setidt(21, &IDTVEC(rsvd4), SDT_SYS386TGT, SEL_KPL);
+ setidt(22, &IDTVEC(rsvd5), SDT_SYS386TGT, SEL_KPL);
+ setidt(23, &IDTVEC(rsvd6), SDT_SYS386TGT, SEL_KPL);
+ setidt(24, &IDTVEC(rsvd7), SDT_SYS386TGT, SEL_KPL);
+ setidt(25, &IDTVEC(rsvd8), SDT_SYS386TGT, SEL_KPL);
+ setidt(26, &IDTVEC(rsvd9), SDT_SYS386TGT, SEL_KPL);
+ setidt(27, &IDTVEC(rsvd10), SDT_SYS386TGT, SEL_KPL);
+ setidt(28, &IDTVEC(rsvd11), SDT_SYS386TGT, SEL_KPL);
+ setidt(29, &IDTVEC(rsvd12), SDT_SYS386TGT, SEL_KPL);
+ setidt(30, &IDTVEC(rsvd13), SDT_SYS386TGT, SEL_KPL);
+ setidt(31, &IDTVEC(rsvd14), SDT_SYS386TGT, SEL_KPL);
+
+#include "isa.h"
+#if NISA >0
+ isa_defaultirq();
+#endif
+
+ r_gdt.rd_limit = sizeof(gdt)-1;
+ r_gdt.rd_base = (int) gdt;
+ lgdt(&r_gdt);
+ r_idt.rd_limit = sizeof(idt)-1;
+ r_idt.rd_base = (int) idt;
+ lidt(&r_idt);
+ lldt(GSEL(GLDT_SEL, SEL_KPL));
+
+#include "ddb.h"
+#if NDDB > 0
+ kdb_init();
+ if (boothowto & RB_KDB)
+ Debugger();
+#endif
+
+ /* Use BIOS values stored in RTC CMOS RAM, since probing
+ * breaks certain 386 AT relics.
+ */
+ biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
+ biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
+/*printf("bios base %d ext %d ", biosbasemem, biosextmem);*/
+
+ /*
+ * 15 Aug 92 Terry Lambert The real fix for the CMOS bug
+ */
+ if( biosbasemem != EXPECT_BASEMEM) {
+ printf( "Warning: Base memory %dK, assuming %dK\n", biosbasemem, EXPECT_BASEMEM);
+ biosbasemem = EXPECT_BASEMEM; /* assume base*/
+ }
+
+ if( biosextmem > 65536) {
+ printf( "Warning: Extended memory %dK(>64M), assuming 0K\n", biosextmem);
+ biosextmem = 0; /* assume none*/
+ }
+
+ /*
+ * Go into normal calculation; Note that we try to run in 640K, and
+ * that invalid CMOS values of non 0xffff are no longer a cause of
+ * ptdi problems. I have found a gutted kernel can run in 640K.
+ */
+ pagesinbase = 640/4 - first/NBPG;
+ pagesinext = biosextmem/4;
+ /* use greater of either base or extended memory. do this
+ * until I reinstitue discontiguous allocation of vm_page
+ * array.
+ */
+ if (pagesinbase > pagesinext)
+ Maxmem = 640/4;
+ else {
+ Maxmem = pagesinext + 0x100000/NBPG;
+ first = 0x100000; /* skip hole */
+ }
+
+ /* This used to explode, since Maxmem used to be 0 for bas CMOS*/
+ maxmem = Maxmem - 1; /* highest page of usable memory */
+ physmem = maxmem; /* number of pages of physmem addr space */
+/*printf("using first 0x%x to 0x%x\n ", first, maxmem*NBPG);*/
+ if (maxmem < 2048/4) {
+ printf("Too little RAM memory. Warning, running in degraded mode.\n");
+#ifdef INFORM_WAIT
+ /*
+ * People with less than 2 Meg have to hit return; this way
+ * we see the messages and can tell them why they blow up later.
+ * If they get working well enough to recompile, they can unset
+ * the flag; otherwise, it's a toy and they have to lump it.
+ */
+ getchar(); /* kernel getchar in /sys/i386/isa/pccons.c*/
+#endif /* !INFORM_WAIT*/
+ }
+ /*
+ * End of CMOS bux fix
+ */
+
+ /* call pmap initialization to make new kernel address space */
+ pmap_bootstrap (first, 0);
+ /* now running on new page tables, configured,and u/iom is accessible */
+
+ /* make a initial tss so microp can get interrupt stack on syscall! */
+ proc0.p_addr->u_pcb.pcb_tss.tss_esp0 = (int) kstack + UPAGES*NBPG;
+ proc0.p_addr->u_pcb.pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
+ _gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
+ ltr(_gsel_tss);
+
+ /* make a call gate to reenter kernel with */
+ gdp = &ldt[LSYS5CALLS_SEL].gd;
+
+ x = (int) &IDTVEC(syscall);
+ gdp->gd_looffset = x++;
+ gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
+ gdp->gd_stkcpy = 0;
+ gdp->gd_type = SDT_SYS386CGT;
+ gdp->gd_dpl = SEL_UPL;
+ gdp->gd_p = 1;
+ gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
+
+ /* transfer to user mode */
+
+ _ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
+ _udatasel = LSEL(LUDATA_SEL, SEL_UPL);
+
+ /* setup proc 0's pcb */
+ bcopy(&sigcode, proc0.p_addr->u_pcb.pcb_sigc, szsigcode);
+ proc0.p_addr->u_pcb.pcb_flags = 0;
+ proc0.p_addr->u_pcb.pcb_ptd = IdlePTD;
+}
+
+extern struct pte *CMAP1, *CMAP2;
+extern caddr_t CADDR1, CADDR2;
+/*
+ * zero out physical memory
+ * specified in relocation units (NBPG bytes)
+ */
+clearseg(n) {
+
+ *(int *)CMAP2 = PG_V | PG_KW | ctob(n);
+ load_cr3(rcr3());
+ bzero(CADDR2,NBPG);
+ *(int *) CADDR2 = 0;
+}
+
+/*
+ * copy a page of physical memory
+ * specified in relocation units (NBPG bytes)
+ */
+copyseg(frm, n) {
+
+ *(int *)CMAP2 = PG_V | PG_KW | ctob(n);
+ load_cr3(rcr3());
+ bcopy((void *)frm, (void *)CADDR2, NBPG);
+}
+
+/*
+ * copy a page of physical memory
+ * specified in relocation units (NBPG bytes)
+ */
+physcopyseg(frm, to) {
+
+ *(int *)CMAP1 = PG_V | PG_KW | ctob(frm);
+ *(int *)CMAP2 = PG_V | PG_KW | ctob(to);
+ load_cr3(rcr3());
+ bcopy(CADDR1, CADDR2, NBPG);
+}
+
+/*aston() {
+ schednetisr(NETISR_AST);
+}*/
+
+setsoftclock() {
+ schednetisr(NETISR_SCLK);
+}
+
+/*
+ * insert an element into a queue
+ */
+#undef insque
+_insque(element, head)
+ register struct prochd *element, *head;
+{
+ element->ph_link = head->ph_link;
+ head->ph_link = (struct proc *)element;
+ element->ph_rlink = (struct proc *)head;
+ ((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
+}
+
+/*
+ * remove an element from a queue
+ */
+#undef remque
+_remque(element)
+ register struct prochd *element;
+{
+ ((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
+ ((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
+ element->ph_rlink = (struct proc *)0;
+}
+
+vmunaccess() {}
+
+/*
+ * Below written in C to allow access to debugging code
+ */
+copyinstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
+ void *toaddr, *fromaddr; {
+ int c,tally;
+
+ tally = 0;
+ while (maxlength--) {
+ c = fubyte(fromaddr++);
+ if (c == -1) {
+ if(lencopied) *lencopied = tally;
+ return(EFAULT);
+ }
+ tally++;
+ *(char *)toaddr++ = (char) c;
+ if (c == 0){
+ if(lencopied) *lencopied = (u_int)tally;
+ return(0);
+ }
+ }
+ if(lencopied) *lencopied = (u_int)tally;
+ return(ENAMETOOLONG);
+}
+
+copyoutstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
+ void *fromaddr, *toaddr; {
+ int c;
+ int tally;
+
+ tally = 0;
+ while (maxlength--) {
+ c = subyte(toaddr++, *(char *)fromaddr);
+ if (c == -1) return(EFAULT);
+ tally++;
+ if (*(char *)fromaddr++ == 0){
+ if(lencopied) *lencopied = tally;
+ return(0);
+ }
+ }
+ if(lencopied) *lencopied = tally;
+ return(ENAMETOOLONG);
+}
+
+copystr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
+ void *fromaddr, *toaddr; {
+ u_int tally;
+
+ tally = 0;
+ while (maxlength--) {
+ *(u_char *)toaddr = *(u_char *)fromaddr++;
+ tally++;
+ if (*(u_char *)toaddr++ == 0) {
+ if(lencopied) *lencopied = tally;
+ return(0);
+ }
+ }
+ if(lencopied) *lencopied = tally;
+ return(ENAMETOOLONG);
+}
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
new file mode 100644
index 0000000..650e21e
--- /dev/null
+++ b/sys/amd64/amd64/mem.c
@@ -0,0 +1,191 @@
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and code derived from software contributed to
+ * Berkeley by William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: mem.c 1.13 89/10/08$
+ * @(#)mem.c 7.2 (Berkeley) 5/9/91
+ */
+
+/*
+ * Memory special file
+ */
+
+#include "param.h"
+#include "conf.h"
+#include "buf.h"
+#include "systm.h"
+#include "uio.h"
+#include "malloc.h"
+
+#include "machine/cpu.h"
+
+#include "vm/vm_param.h"
+#include "vm/lock.h"
+#include "vm/vm_statistics.h"
+#include "vm/pmap.h"
+#include "vm/vm_prot.h"
+
+extern char *vmmap; /* poor name! */
+/*ARGSUSED*/
+mmrw(dev, uio, flags)
+ dev_t dev;
+ struct uio *uio;
+ int flags;
+{
+ register int o;
+ register u_int c, v;
+ register struct iovec *iov;
+ int error = 0;
+ caddr_t zbuf = NULL;
+
+ while (uio->uio_resid > 0 && error == 0) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("mmrw");
+ continue;
+ }
+ switch (minor(dev)) {
+
+/* minor device 0 is physical memory */
+ case 0:
+ v = uio->uio_offset;
+ pmap_enter(pmap_kernel(), vmmap, v,
+ uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
+ TRUE);
+ o = (int)uio->uio_offset & PGOFSET;
+ c = (u_int)(NBPG - ((int)iov->iov_base & PGOFSET));
+ c = MIN(c, (u_int)(NBPG - o));
+ c = MIN(c, (u_int)iov->iov_len);
+ error = uiomove((caddr_t)&vmmap[o], (int)c, uio);
+ pmap_remove(pmap_kernel(), vmmap, &vmmap[NBPG]);
+ continue;
+
+/* minor device 1 is kernel memory */
+ case 1:
+ c = iov->iov_len;
+ if (!kernacc((caddr_t)uio->uio_offset, c,
+ uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
+ return(EFAULT);
+ error = uiomove((caddr_t)uio->uio_offset, (int)c, uio);
+ continue;
+
+/* minor device 2 is EOF/RATHOLE */
+ case 2:
+ if (uio->uio_rw == UIO_READ)
+ return (0);
+ c = iov->iov_len;
+ break;
+
+/* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
+ case 12:
+ if (uio->uio_rw == UIO_WRITE) {
+ c = iov->iov_len;
+ break;
+ }
+ if (zbuf == NULL) {
+ zbuf = (caddr_t)
+ malloc(CLBYTES, M_TEMP, M_WAITOK);
+ bzero(zbuf, CLBYTES);
+ }
+ c = MIN(iov->iov_len, CLBYTES);
+ error = uiomove(zbuf, (int)c, uio);
+ continue;
+
+#ifdef notyet
+/* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate
+ i/o device address bus, different than memory bus. Semantics here are
+ very different than ordinary read/write, as if iov_len is a multiple
+ an implied string move from a single port will be done. Note that lseek
+ must be used to set the port number reliably. */
+ case 14:
+ if (iov->iov_len == 1) {
+ u_char tmp;
+ tmp = inb(uio->uio_offset);
+ error = uiomove (&tmp, iov->iov_len, uio);
+ } else {
+ if (!useracc((caddr_t)iov->iov_base,
+ iov->iov_len, uio->uio_rw))
+ return (EFAULT);
+ insb(uio->uio_offset, iov->iov_base,
+ iov->iov_len);
+ }
+ break;
+ case 15:
+ if (iov->iov_len == sizeof (short)) {
+ u_short tmp;
+ tmp = inw(uio->uio_offset);
+ error = uiomove (&tmp, iov->iov_len, uio);
+ } else {
+ if (!useracc((caddr_t)iov->iov_base,
+ iov->iov_len, uio->uio_rw))
+ return (EFAULT);
+ insw(uio->uio_offset, iov->iov_base,
+ iov->iov_len/ sizeof (short));
+ }
+ break;
+ case 16:
+ if (iov->iov_len == sizeof (long)) {
+ u_long tmp;
+ tmp = inl(uio->uio_offset);
+ error = uiomove (&tmp, iov->iov_len, uio);
+ } else {
+ if (!useracc((caddr_t)iov->iov_base,
+ iov->iov_len, uio->uio_rw))
+ return (EFAULT);
+ insl(uio->uio_offset, iov->iov_base,
+ iov->iov_len/ sizeof (long));
+ }
+ break;
+#endif
+
+ default:
+ return (ENXIO);
+ }
+ if (error)
+ break;
+ iov->iov_base += c;
+ iov->iov_len -= c;
+ uio->uio_offset += c;
+ uio->uio_resid -= c;
+ }
+ if (zbuf)
+ free(zbuf, M_TEMP);
+ return (error);
+}
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
new file mode 100644
index 0000000..66c7fec
--- /dev/null
+++ b/sys/amd64/amd64/pmap.c
@@ -0,0 +1,1728 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00063
+ * -------------------- ----- ----------------------
+ *
+ * 28 Nov 1991 Poul-Henning Kamp Speedup processing.
+ */
+static char rcsid[] = "$Header: /usr/src/sys.386bsd/i386/i386/RCS/pmap.c,v 1.3 92/01/21 14:26:44 william Exp Locker: root $";
+
+/*
+ * Derived from hp300 version by Mike Hibler, this version by William
+ * Jolitz uses a recursive map [a pde points to the page directory] to
+ * map the page tables using the pagetables themselves. This is done to
+ * reduce the impact on kernel virtual memory for lots of sparse address
+ * space, and to reduce the cost of memory to each process.
+ *
+ * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90
+ */
+
+/*
+ * Reno i386 version, from Mike Hibler's hp300 version.
+ */
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include "param.h"
+#include "proc.h"
+#include "malloc.h"
+#include "user.h"
+
+#include "vm/vm.h"
+#include "vm/vm_kern.h"
+#include "vm/vm_page.h"
+/*#include "vm/vm_pageout.h"*/
+
+#include "i386/isa/isa.h"
+
+/*
+ * Allocate various and sundry SYSMAPs used in the days of old VM
+ * and not yet converted. XXX.
+ */
+#define BSDVM_COMPAT 1
+
+#ifdef DEBUG
+struct {
+ int kernel; /* entering kernel mapping */
+ int user; /* entering user mapping */
+ int ptpneeded; /* needed to allocate a PT page */
+ int pwchange; /* no mapping change, just wiring or protection */
+ int wchange; /* no mapping change, just wiring */
+ int mchange; /* was mapped but mapping to different page */
+ int managed; /* a managed page */
+ int firstpv; /* first mapping for this PA */
+ int secondpv; /* second mapping for this PA */
+ int ci; /* cache inhibited */
+ int unmanaged; /* not a managed page */
+ int flushes; /* cache flushes */
+} enter_stats;
+struct {
+ int calls;
+ int removes;
+ int pvfirst;
+ int pvsearch;
+ int ptinvalid;
+ int uflushes;
+ int sflushes;
+} remove_stats;
+
+int debugmap = 0;
+int pmapdebug = 0 /* 0xffff */;
+#define PDB_FOLLOW 0x0001
+#define PDB_INIT 0x0002
+#define PDB_ENTER 0x0004
+#define PDB_REMOVE 0x0008
+#define PDB_CREATE 0x0010
+#define PDB_PTPAGE 0x0020
+#define PDB_CACHE 0x0040
+#define PDB_BITS 0x0080
+#define PDB_COLLECT 0x0100
+#define PDB_PROTECT 0x0200
+#define PDB_PDRTAB 0x0400
+#define PDB_PARANOIA 0x2000
+#define PDB_WIRING 0x4000
+#define PDB_PVDUMP 0x8000
+
+int pmapvacflush = 0;
+#define PVF_ENTER 0x01
+#define PVF_REMOVE 0x02
+#define PVF_PROTECT 0x04
+#define PVF_TOTAL 0x80
+#endif
+
+/*
+ * Get PDEs and PTEs for user/kernel address space
+ */
+#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
+
+#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
+
+#define pmap_pde_v(pte) ((pte)->pd_v)
+#define pmap_pte_w(pte) ((pte)->pg_w)
+/* #define pmap_pte_ci(pte) ((pte)->pg_ci) */
+#define pmap_pte_m(pte) ((pte)->pg_m)
+#define pmap_pte_u(pte) ((pte)->pg_u)
+#define pmap_pte_v(pte) ((pte)->pg_v)
+#define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v))
+#define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v))
+
+/*
+ * Given a map and a machine independent protection code,
+ * convert to a vax protection code.
+ */
+#define pte_prot(m, p) (protection_codes[p])
+int protection_codes[8];
+
+struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+vm_offset_t avail_start; /* PA of first available physical page */
+vm_offset_t avail_end; /* PA of last available physical page */
+vm_size_t mem_size; /* memory size in bytes */
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+vm_offset_t vm_first_phys; /* PA of first managed page */
+vm_offset_t vm_last_phys; /* PA just past last managed page */
+int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */
+boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+char *pmap_attributes; /* reference and modify bits */
+
+boolean_t pmap_testbit();
+void pmap_clear_modify();
+
+#if BSDVM_COMPAT
+#include "msgbuf.h"
+
+/*
+ * All those kernel PT submaps that BSD is so fond of
+ */
+struct pte *CMAP1, *CMAP2, *mmap;
+caddr_t CADDR1, CADDR2, vmmap;
+struct pte *msgbufmap;
+struct msgbuf *msgbufp;
+#endif
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Map the kernel's code and data, and allocate the system page table.
+ *
+ * On the I386 this is called after mapping has already been enabled
+ * and just syncs the pmap module with what has already been done.
+ * [We can't call it easily with mapping off since the kernel is not
+ * mapped with PA == VA, hence we would have to relocate every address
+ * from the linked base (virtual) address 0xFE000000 to the actual
+ * (physical) address starting relative to 0]
+ */
+struct pte *pmap_pte();
+
+void
+pmap_bootstrap(firstaddr, loadaddr)
+ vm_offset_t firstaddr;
+ vm_offset_t loadaddr;
+{
+#if BSDVM_COMPAT
+ vm_offset_t va;
+ struct pte *pte;
+#endif
+ extern vm_offset_t maxmem, physmem;
+extern int IdlePTD;
+
+ avail_start = firstaddr;
+ avail_end = maxmem << PG_SHIFT;
+
+ /* XXX: allow for msgbuf */
+ avail_end -= i386_round_page(sizeof(struct msgbuf));
+
+ mem_size = physmem << PG_SHIFT;
+ virtual_avail = (vm_offset_t)atdevbase + 0x100000 - 0xa0000 + 10*NBPG;
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+ i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE;
+
+ /*
+ * Initialize protection array.
+ */
+ i386_protection_init();
+
+ /*
+ * The kernel's pmap is statically allocated so we don't
+ * have to use pmap_create, which is unlikely to work
+ * correctly at this part of the boot sequence.
+ */
+ kernel_pmap = &kernel_pmap_store;
+
+#ifdef notdef
+ /*
+ * Create Kernel page directory table and page maps.
+ * [ currently done in locore. i have wild and crazy ideas -wfj ]
+ */
+ bzero(firstaddr, 4*NBPG);
+ kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
+ kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
+
+ firstaddr += NBPG;
+ for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
+ x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) {
+ struct pde *pde;
+ pde = kernel_pmap->pm_pdir + x;
+ *(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW;
+ }
+#else
+ kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD);
+#endif
+
+
+ simple_lock_init(&kernel_pmap->pm_lock);
+ kernel_pmap->pm_count = 1;
+
+#if BSDVM_COMPAT
+ /*
+ * Allocate all the submaps we need
+ */
+#define SYSMAP(c, p, v, n) \
+ v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n);
+
+ va = virtual_avail;
+ pte = pmap_pte(kernel_pmap, va);
+
+ SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
+ SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
+ SYSMAP(caddr_t ,mmap ,vmmap ,1 )
+ SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 )
+ virtual_avail = va;
+#endif
+ /*
+ * reserve special hunk of memory for use by bus dma as a bounce
+ * buffer (contiguous virtual *and* physical memory). for now,
+ * assume vm does not use memory beneath hole, and we know that
+ * the bootstrap uses top 32k of base memory. -wfj
+ */
+ {
+ extern vm_offset_t isaphysmem;
+ isaphysmem = va;
+
+ virtual_avail = pmap_map(va, 0xa0000 - 32*1024, 0xa0000, VM_PROT_ALL);
+ }
+
+ *(int *)PTD = 0;
+ load_cr3(rcr3());
+
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init(phys_start, phys_end)
+ vm_offset_t phys_start, phys_end;
+{
+ vm_offset_t addr, addr2;
+ vm_size_t npg, s;
+ int rv;
+ extern int KPTphys;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_init(%x, %x)\n", phys_start, phys_end);
+#endif
+ /*
+ * Now that kernel map has been allocated, we can mark as
+ * unavailable regions which we have mapped in locore.
+ */
+ addr = atdevbase;
+ (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
+ &addr, (0x100000-0xa0000), FALSE);
+
+ addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */;
+ vm_object_reference(kernel_object);
+ (void) vm_map_find(kernel_map, kernel_object, addr,
+ &addr, 2*NBPG, FALSE);
+
+ /*
+ * Allocate memory for random pmap data structures. Includes the
+ * pv_head_table and pmap_attributes.
+ */
+ npg = atop(phys_end - phys_start);
+ s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg);
+ s = round_page(s);
+ addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+ pv_table = (pv_entry_t) addr;
+ addr += sizeof(struct pv_entry) * npg;
+ pmap_attributes = (char *) addr;
+#ifdef DEBUG
+ if (pmapdebug & PDB_INIT)
+ printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n",
+ s, npg, pv_table, pmap_attributes);
+#endif
+
+ /*
+ * Now it is safe to enable pv_table recording.
+ */
+ vm_first_phys = phys_start;
+ vm_last_phys = phys_end;
+ pmap_initialized = TRUE;
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vm_offset_t
+pmap_map(virt, start, end, prot)
+ vm_offset_t virt;
+ vm_offset_t start;
+ vm_offset_t end;
+ int prot;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
+#endif
+ while (start < end) {
+ pmap_enter(kernel_pmap, virt, start, prot, FALSE);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ return(virt);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ *
+ * [ just allocate a ptd and mark it uninitialize -- should we track
+ * with a table which process has which ptd? -wfj ]
+ */
+
+pmap_t
+pmap_create(size)
+ vm_size_t size;
+{
+ register pmap_t pmap;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
+ printf("pmap_create(%x)\n", size);
+#endif
+ /*
+ * Software use map does not need a pmap
+ */
+ if (size)
+ return(NULL);
+
+ /* XXX: is it ok to wait here? */
+ pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
+#ifdef notifwewait
+ if (pmap == NULL)
+ panic("pmap_create: cannot allocate a pmap");
+#endif
+ bzero(pmap, sizeof(*pmap));
+ pmap_pinit(pmap);
+ return (pmap);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+void
+pmap_pinit(pmap)
+ register struct pmap *pmap;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
+ pg("pmap_pinit(%x)\n", pmap);
+#endif
+
+ /*
+ * No need to allocate page table space yet but we do need a
+ * valid page directory table.
+ */
+ pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
+
+ /* wire in kernel global address entries */
+ bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST,
+ (KPTDI_LAST-KPTDI_FIRST+1)*4);
+
+ /* install self-referential address mapping entry */
+ *(int *)(pmap->pm_pdir+PTDPTDI) =
+ (int)pmap_extract(kernel_pmap, pmap->pm_pdir) | PG_V | PG_URKW;
+
+ pmap->pm_count = 1;
+ simple_lock_init(&pmap->pm_lock);
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+void
+pmap_destroy(pmap)
+ register pmap_t pmap;
+{
+ int count;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_destroy(%x)\n", pmap);
+#endif
+ if (pmap == NULL)
+ return;
+
+ simple_lock(&pmap->pm_lock);
+ count = --pmap->pm_count;
+ simple_unlock(&pmap->pm_lock);
+ if (count == 0) {
+ pmap_release(pmap);
+ free((caddr_t)pmap, M_VMPMAP);
+ }
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap)
+ register struct pmap *pmap;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ pg("pmap_release(%x)\n", pmap);
+#endif
+#ifdef notdef /* DIAGNOSTIC */
+ /* count would be 0 from pmap_destroy... */
+ simple_lock(&pmap->pm_lock);
+ if (pmap->pm_count != 1)
+ panic("pmap_release count");
+#endif
+ kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap)
+ pmap_t pmap;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_reference(%x)", pmap);
+#endif
+ if (pmap != NULL) {
+ simple_lock(&pmap->pm_lock);
+ pmap->pm_count++;
+ simple_unlock(&pmap->pm_lock);
+ }
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap, sva, eva)
+ struct pmap *pmap;
+ register vm_offset_t sva;
+ register vm_offset_t eva;
+{
+ register pt_entry_t *ptp,*ptq;
+ vm_offset_t va;
+ vm_offset_t pa;
+ pt_entry_t *pte;
+ pv_entry_t pv, npv;
+ int ix;
+ int s, bits;
+#ifdef DEBUG
+ pt_entry_t opte;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ pg("pmap_remove(%x, %x, %x)", pmap, sva, eva);
+#endif
+
+ if (pmap == NULL)
+ return;
+
+ /* are we current address space or kernel? */
+ if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
+ || pmap == kernel_pmap)
+ ptp=PTmap;
+
+ /* otherwise, we are alternate address space */
+ else {
+ if (pmap->pm_pdir[PTDPTDI].pd_pfnum
+ != APTDpde.pd_pfnum) {
+ APTDpde = pmap->pm_pdir[PTDPTDI];
+ tlbflush();
+ }
+ ptp=APTmap;
+ }
+#ifdef DEBUG
+ remove_stats.calls++;
+#endif
+
+ /* this is essential since we must check the PDE(sva) for precense */
+ while (sva <= eva && !pmap_pde_v(pmap_pde(pmap, sva)))
+ sva = (sva & PD_MASK) + (1<<PD_SHIFT);
+ sva = i386_btop(sva);
+ eva = i386_btop(eva);
+
+ for (; sva < eva; sva++) {
+ /*
+ * Weed out invalid mappings.
+ * Note: we assume that the page directory table is
+ * always allocated, and in kernel virtual.
+ */
+ ptq=ptp+sva;
+ while((sva & 0x3ff) && !pmap_pte_pa(ptq))
+ {
+ if(++sva >= eva)
+ return;
+ ptq++;
+ }
+
+
+ if(!(sva & 0x3ff)) /* Only check once in a while */
+ {
+ if (!pmap_pde_v(pmap_pde(pmap, i386_ptob(sva))))
+ {
+ /* We can race ahead here, straight to next pde.. */
+ sva = (sva & 0xffc00) + (1<<10) -1 ;
+ continue;
+ }
+ }
+ if(!pmap_pte_pa(ptp+sva))
+ continue;
+
+ pte = ptp + sva;
+ pa = pmap_pte_pa(pte);
+ va = i386_ptob(sva);
+#ifdef DEBUG
+ opte = *pte;
+ remove_stats.removes++;
+#endif
+ /*
+ * Update statistics
+ */
+ if (pmap_pte_w(pte))
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ /*
+ * Invalidate the PTEs.
+ * XXX: should cluster them up and invalidate as many
+ * as possible at once.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_REMOVE)
+ printf("remove: inv %x ptes at %x(%x) ",
+ i386pagesperpage, pte, *(int *)pte);
+#endif
+ bits = ix = 0;
+ do {
+ bits |= *(int *)pte & (PG_U|PG_M);
+ *(int *)pte++ = 0;
+ /*TBIS(va + ix * I386_PAGE_SIZE);*/
+ } while (++ix != i386pagesperpage);
+ if (curproc && pmap == &curproc->p_vmspace->vm_pmap)
+ pmap_activate(pmap, (struct pcb *)curproc->p_addr);
+ /* are we current address space or kernel? */
+ /*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
+ || pmap == kernel_pmap)
+ load_cr3(curpcb->pcb_ptd);*/
+ tlbflush();
+
+#ifdef needednotdone
+reduce wiring count on page table pages as references drop
+#endif
+
+ /*
+ * Remove from the PV table (raise IPL since we
+ * may be called at interrupt time).
+ */
+ if (pa < vm_first_phys || pa >= vm_last_phys)
+ continue;
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pmap == pv->pv_pmap && va == pv->pv_va) {
+ npv = pv->pv_next;
+ if (npv) {
+ *pv = *npv;
+ free((caddr_t)npv, M_VMPVENT);
+ } else
+ pv->pv_pmap = NULL;
+#ifdef DEBUG
+ remove_stats.pvfirst++;
+#endif
+ } else {
+ for (npv = pv->pv_next; npv; npv = npv->pv_next) {
+#ifdef DEBUG
+ remove_stats.pvsearch++;
+#endif
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ break;
+ pv = npv;
+ }
+#ifdef DEBUG
+ if (npv == NULL)
+ panic("pmap_remove: PA not in pv_tab");
+#endif
+ pv->pv_next = npv->pv_next;
+ free((caddr_t)npv, M_VMPVENT);
+ pv = pa_to_pvh(pa);
+ }
+
+#ifdef notdef
+[tally number of pagetable pages, if sharing of ptpages adjust here]
+#endif
+ /*
+ * Update saved attributes for managed page
+ */
+ pmap_attributes[pa_index(pa)] |= bits;
+ splx(s);
+ }
+#ifdef notdef
+[cache and tlb flushing, if needed]
+#endif
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ */
+void
+pmap_remove_all(pa)
+ vm_offset_t pa;
+{
+ register pv_entry_t pv;
+ int s;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ printf("pmap_remove_all(%x)", pa);
+ /*pmap_pvdump(pa);*/
+#endif
+ /*
+ * Not one of ours
+ */
+ if (pa < vm_first_phys || pa >= vm_last_phys)
+ return;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Do it the easy way for now
+ */
+ while (pv->pv_pmap != NULL) {
+#ifdef DEBUG
+ if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) ||
+ pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa)
+ panic("pmap_remove_all: bad mapping");
+#endif
+ pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE);
+ }
+ splx(s);
+}
+
+/*
+ * Routine: pmap_copy_on_write
+ * Function:
+ * Remove write privileges from all
+ * physical maps for this physical page.
+ */
+void
+pmap_copy_on_write(pa)
+ vm_offset_t pa;
+{
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
+ printf("pmap_copy_on_write(%x)", pa);
+#endif
+ pmap_changebit(pa, PG_RO, TRUE);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap, sva, eva, prot)
+ register pmap_t pmap;
+ vm_offset_t sva, eva;
+ vm_prot_t prot;
+{
+ register pt_entry_t *pte;
+ register vm_offset_t va;
+ register int ix;
+ int i386prot;
+ boolean_t firstpage = TRUE;
+ register pt_entry_t *ptp;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
+ printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);
+#endif
+ if (pmap == NULL)
+ return;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+ if (prot & VM_PROT_WRITE)
+ return;
+
+ /* are we current address space or kernel? */
+ if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
+ || pmap == kernel_pmap)
+ ptp=PTmap;
+
+ /* otherwise, we are alternate address space */
+ else {
+ if (pmap->pm_pdir[PTDPTDI].pd_pfnum
+ != APTDpde.pd_pfnum) {
+ APTDpde = pmap->pm_pdir[PTDPTDI];
+ tlbflush();
+ }
+ ptp=APTmap;
+ }
+ for (va = sva; va < eva; va += PAGE_SIZE) {
+ /*
+ * Page table page is not allocated.
+ * Skip it, we don't want to force allocation
+ * of unnecessary PTE pages just to set the protection.
+ */
+ if (!pmap_pde_v(pmap_pde(pmap, va))) {
+ /* XXX: avoid address wrap around */
+ if (va >= i386_trunc_pdr((vm_offset_t)-1))
+ break;
+ va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE;
+ continue;
+ }
+
+ pte = ptp + i386_btop(va);
+
+ /*
+ * Page not valid. Again, skip it.
+ * Should we do this? Or set protection anyway?
+ */
+ if (!pmap_pte_v(pte))
+ continue;
+
+ ix = 0;
+ i386prot = pte_prot(pmap, prot);
+ if(va < UPT_MAX_ADDRESS)
+ i386prot |= 2 /*PG_u*/;
+ do {
+ /* clear VAC here if PG_RO? */
+ pmap_pte_set_prot(pte++, i386prot);
+ /*TBIS(va + ix * I386_PAGE_SIZE);*/
+ } while (++ix != i386pagesperpage);
+ }
+ if (curproc && pmap == &curproc->p_vmspace->vm_pmap)
+ pmap_activate(pmap, (struct pcb *)curproc->p_addr);
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+void
+pmap_enter(pmap, va, pa, prot, wired)
+ register pmap_t pmap;
+ vm_offset_t va;
+ register vm_offset_t pa;
+ vm_prot_t prot;
+ boolean_t wired;
+{
+ register pt_entry_t *pte;
+ register int npte, ix;
+ vm_offset_t opa;
+ boolean_t cacheable = TRUE;
+ boolean_t checkpv = TRUE;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
+ printf("pmap_enter(%x, %x, %x, %x, %x)",
+ pmap, va, pa, prot, wired);
+#endif
+ if (pmap == NULL)
+ return;
+
+ if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
+ /* also, should not muck with PTD va! */
+
+#ifdef DEBUG
+ if (pmap == kernel_pmap)
+ enter_stats.kernel++;
+ else
+ enter_stats.user++;
+#endif
+
+ /*
+ * Page Directory table entry not valid, we need a new PT page
+ */
+ if (!pmap_pde_v(pmap_pde(pmap, va))) {
+ pg("ptdi %x", pmap->pm_pdir[PTDPTDI]);
+ }
+
+ pte = pmap_pte(pmap, va);
+ opa = pmap_pte_pa(pte);
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: pte %x, *pte %x ", pte, *(int *)pte);
+#endif
+
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if (opa == pa) {
+#ifdef DEBUG
+ enter_stats.pwchange++;
+#endif
+ /*
+ * Wiring change, just update stats.
+ * We don't worry about wiring PT pages as they remain
+ * resident as long as there are valid mappings in them.
+ * Hence, if a user page is wired, the PT page will be also.
+ */
+ if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ pg("enter: wiring change -> %x ", wired);
+#endif
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ else
+ pmap->pm_stats.wired_count--;
+#ifdef DEBUG
+ enter_stats.wchange++;
+#endif
+ }
+ goto validate;
+ }
+
+ /*
+ * Mapping has changed, invalidate old range and fall through to
+ * handle validating new mapping.
+ */
+ if (opa) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: removing old mapping %x pa %x ", va, opa);
+#endif
+ pmap_remove(pmap, va, va + PAGE_SIZE);
+#ifdef DEBUG
+ enter_stats.mchange++;
+#endif
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory
+ * Note that we raise IPL while manipulating pv_table
+ * since pmap_enter can be called at interrupt time.
+ */
+ if (pa >= vm_first_phys && pa < vm_last_phys) {
+ register pv_entry_t pv, npv;
+ int s;
+
+#ifdef DEBUG
+ enter_stats.managed++;
+#endif
+ pv = pa_to_pvh(pa);
+ s = splimp();
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: pv at %x: %x/%x/%x ",
+ pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
+#endif
+ /*
+ * No entries yet, use header as the first entry
+ */
+ if (pv->pv_pmap == NULL) {
+#ifdef DEBUG
+ enter_stats.firstpv++;
+#endif
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+ pv->pv_next = NULL;
+ pv->pv_flags = 0;
+ }
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ else {
+ /*printf("second time: ");*/
+#ifdef DEBUG
+ for (npv = pv; npv; npv = npv->pv_next)
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ panic("pmap_enter: already in pv_tab");
+#endif
+ npv = (pv_entry_t)
+ malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
+ npv->pv_va = va;
+ npv->pv_pmap = pmap;
+ npv->pv_next = pv->pv_next;
+ pv->pv_next = npv;
+#ifdef DEBUG
+ if (!npv->pv_next)
+ enter_stats.secondpv++;
+#endif
+ }
+ splx(s);
+ }
+ /*
+ * Assumption: if it is not part of our managed memory
+ * then it must be device memory which may be volitile.
+ */
+ if (pmap_initialized) {
+ checkpv = cacheable = FALSE;
+#ifdef DEBUG
+ enter_stats.unmanaged++;
+#endif
+ }
+
+ /*
+ * Increment counters
+ */
+ pmap->pm_stats.resident_count++;
+ if (wired)
+ pmap->pm_stats.wired_count++;
+
+validate:
+ /*
+ * Now validate mapping with desired protection/wiring.
+ * Assume uniform modified and referenced status for all
+ * I386 pages in a MACH page.
+ */
+ npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
+ npte |= (*(int *)pte & (PG_M|PG_U));
+ if (wired)
+ npte |= PG_W;
+ if(va < UPT_MIN_ADDRESS)
+ npte |= PG_u;
+ else if(va < UPT_MAX_ADDRESS)
+ npte |= PG_u | PG_RW;
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: new pte value %x ", npte);
+#endif
+ ix = 0;
+ do {
+ *(int *)pte++ = npte;
+ /*TBIS(va);*/
+ npte += I386_PAGE_SIZE;
+ va += I386_PAGE_SIZE;
+ } while (++ix != i386pagesperpage);
+ pte--;
+#ifdef DEBUGx
+cache, tlb flushes
+#endif
+/*pads(pmap);*/
+ /*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/
+ tlbflush();
+}
+
+/*
+ * pmap_page_protect:
+ *
+ * Lower the permission for all mappings to a given page.
+ */
+void
+pmap_page_protect(phys, prot)
+ vm_offset_t phys;
+ vm_prot_t prot;
+{
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_copy_on_write(phys);
+ break;
+ case VM_PROT_ALL:
+ break;
+ default:
+ pmap_remove_all(phys);
+ break;
+ }
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_change_wiring(pmap, va, wired)
+ register pmap_t pmap;
+ vm_offset_t va;
+ boolean_t wired;
+{
+ register pt_entry_t *pte;
+ register int ix;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired);
+#endif
+ if (pmap == NULL)
+ return;
+
+ pte = pmap_pte(pmap, va);
+#ifdef DEBUG
+ /*
+ * Page table page is not allocated.
+ * Should this ever happen? Ignore it for now,
+ * we don't want to force allocation of unnecessary PTE pages.
+ */
+ if (!pmap_pde_v(pmap_pde(pmap, va))) {
+ if (pmapdebug & PDB_PARANOIA)
+ pg("pmap_change_wiring: invalid PDE for %x ", va);
+ return;
+ }
+ /*
+ * Page not valid. Should this ever happen?
+ * Just continue and change wiring anyway.
+ */
+ if (!pmap_pte_v(pte)) {
+ if (pmapdebug & PDB_PARANOIA)
+ pg("pmap_change_wiring: invalid PTE for %x ", va);
+ }
+#endif
+ if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ else
+ pmap->pm_stats.wired_count--;
+ }
+ /*
+ * Wiring is not a hardware characteristic so there is no need
+ * to invalidate TLB.
+ */
+ ix = 0;
+ do {
+ pmap_pte_set_w(pte++, wired);
+ } while (++ix != i386pagesperpage);
+}
+
+/*
+ * Routine: pmap_pte
+ * Function:
+ * Extract the page table entry associated
+ * with the given map/virtual_address pair.
+ * [ what about induced faults -wfj]
+ */
+
+struct pte *pmap_pte(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
+{
+
+#ifdef DEBUGx
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_pte(%x, %x) ->\n", pmap, va);
+#endif
+ if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
+
+ /* are we current address space or kernel? */
+ if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
+ || pmap == kernel_pmap)
+ return ((struct pte *) vtopte(va));
+
+ /* otherwise, we are alternate address space */
+ else {
+ if (pmap->pm_pdir[PTDPTDI].pd_pfnum
+ != APTDpde.pd_pfnum) {
+ APTDpde = pmap->pm_pdir[PTDPTDI];
+ tlbflush();
+ }
+ return((struct pte *) avtopte(va));
+ }
+ }
+ return(0);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+
+vm_offset_t
+pmap_extract(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
+{
+ register vm_offset_t pa;
+
+#ifdef DEBUGx
+ if (pmapdebug & PDB_FOLLOW)
+ pg("pmap_extract(%x, %x) -> ", pmap, va);
+#endif
+ pa = 0;
+ if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
+ pa = *(int *) pmap_pte(pmap, va);
+ }
+ if (pa)
+ pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
+#ifdef DEBUGx
+ if (pmapdebug & PDB_FOLLOW)
+ printf("%x\n", pa);
+#endif
+ return(pa);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap;
+ pmap_t src_pmap;
+ vm_offset_t dst_addr;
+ vm_size_t len;
+ vm_offset_t src_addr;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_copy(%x, %x, %x, %x, %x)",
+ dst_pmap, src_pmap, dst_addr, len, src_addr);
+#endif
+}
+
+/*
+ * Require that all active physical maps contain no
+ * incorrect entries NOW. [This update includes
+ * forcing updates of any address map caching.]
+ *
+ * Generally used to insure that a thread about
+ * to run will see a semantically correct world.
+ */
+void pmap_update()
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_update()");
+#endif
+ tlbflush();
+}
+
+/*
+ * Routine: pmap_collect
+ * Function:
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Usage:
+ * Called by the pageout daemon when pages are scarce.
+ * [ needs to be written -wfj ]
+ */
+void
+pmap_collect(pmap)
+ pmap_t pmap;
+{
+ register vm_offset_t pa;
+ register pv_entry_t pv;
+ register int *pte;
+ vm_offset_t kpa;
+ int s;
+
+#ifdef DEBUG
+ int *pde;
+ int opmapdebug;
+ printf("pmap_collect(%x) ", pmap);
+#endif
+ if (pmap != kernel_pmap)
+ return;
+
+}
+
+/* [ macro again?, should I force kstack into user map here? -wfj ] */
+void
+pmap_activate(pmap, pcbp)
+ register pmap_t pmap;
+ struct pcb *pcbp;
+{
+int x;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB))
+ pg("pmap_activate(%x, %x) ", pmap, pcbp);
+#endif
+ PMAP_ACTIVATE(pmap, pcbp);
+/*printf("pde ");
+for(x=0x3f6; x < 0x3fA; x++)
+ printf("%x ", pmap->pm_pdir[x]);*/
+/*pads(pmap);*/
+/*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/
+}
+
+/*
+ * Routine: pmap_kernel
+ * Function:
+ * Returns the physical map handle for the kernel.
+ */
+pmap_t
+pmap_kernel()
+{
+ return (kernel_pmap);
+}
+
+/*
+ * pmap_zero_page zeros the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bzero to clear its contents, one machine dependent page
+ * at a time.
+ */
+pmap_zero_page(phys)
+ register vm_offset_t phys;
+{
+ register int ix;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_zero_page(%x)", phys);
+#endif
+ phys >>= PG_SHIFT;
+ ix = 0;
+ do {
+ clearseg(phys++);
+ } while (++ix != i386pagesperpage);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ */
+pmap_copy_page(src, dst)
+ register vm_offset_t src, dst;
+{
+ register int ix;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_copy_page(%x, %x)", src, dst);
+#endif
+ src >>= PG_SHIFT;
+ dst >>= PG_SHIFT;
+ ix = 0;
+ do {
+ physcopyseg(src++, dst++);
+ } while (++ix != i386pagesperpage);
+}
+
+
+/*
+ * Routine: pmap_pageable
+ * Function:
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
+ *
+ * A page which is not pageable may not take
+ * a fault; therefore, its page table entry
+ * must remain valid for the duration.
+ *
+ * This routine is merely advisory; pmap_enter
+ * will specify that these pages are to be wired
+ * down (or not) as appropriate.
+ */
+pmap_pageable(pmap, sva, eva, pageable)
+ pmap_t pmap;
+ vm_offset_t sva, eva;
+ boolean_t pageable;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_pageable(%x, %x, %x, %x)",
+ pmap, sva, eva, pageable);
+#endif
+ /*
+ * If we are making a PT page pageable then all valid
+ * mappings must be gone from that page. Hence it should
+ * be all zeros and there is no need to clean it.
+ * Assumptions:
+ * - we are called with only one page at a time
+ * - PT pages have only one pv_table entry
+ */
+ if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
+ register pv_entry_t pv;
+ register vm_offset_t pa;
+
+#ifdef DEBUG
+ if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
+ printf("pmap_pageable(%x, %x, %x, %x)",
+ pmap, sva, eva, pageable);
+#endif
+ /*if (!pmap_pde_v(pmap_pde(pmap, sva)))
+ return;*/
+ if(pmap_pte(pmap, sva) == 0)
+ return;
+ pa = pmap_pte_pa(pmap_pte(pmap, sva));
+ if (pa < vm_first_phys || pa >= vm_last_phys)
+ return;
+ pv = pa_to_pvh(pa);
+ /*if (!ispt(pv->pv_va))
+ return;*/
+#ifdef DEBUG
+ if (pv->pv_va != sva || pv->pv_next) {
+ pg("pmap_pageable: bad PT page va %x next %x\n",
+ pv->pv_va, pv->pv_next);
+ return;
+ }
+#endif
+ /*
+ * Mark it unmodified to avoid pageout
+ */
+ pmap_clear_modify(pa);
+#ifdef needsomethinglikethis
+ if (pmapdebug & PDB_PTPAGE)
+ pg("pmap_pageable: PT page %x(%x) unmodified\n",
+ sva, *(int *)pmap_pte(pmap, sva));
+ if (pmapdebug & PDB_WIRING)
+ pmap_check_wiring("pageable", sva);
+#endif
+ }
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+
+void
+pmap_clear_modify(pa)
+ vm_offset_t pa;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_clear_modify(%x)", pa);
+#endif
+ pmap_changebit(pa, PG_M, FALSE);
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+
+void pmap_clear_reference(pa)
+ vm_offset_t pa;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_clear_reference(%x)", pa);
+#endif
+ pmap_changebit(pa, PG_U, FALSE);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+
+boolean_t
+pmap_is_referenced(pa)
+ vm_offset_t pa;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ boolean_t rv = pmap_testbit(pa, PG_U);
+ printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]);
+ return(rv);
+ }
+#endif
+ return(pmap_testbit(pa, PG_U));
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+
+boolean_t
+pmap_is_modified(pa)
+ vm_offset_t pa;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ boolean_t rv = pmap_testbit(pa, PG_M);
+ printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]);
+ return(rv);
+ }
+#endif
+ return(pmap_testbit(pa, PG_M));
+}
+
+vm_offset_t
+pmap_phys_address(ppn)
+ int ppn;
+{
+ return(i386_ptob(ppn));
+}
+
+/*
+ * Miscellaneous support routines follow
+ */
+
+i386_protection_init()
+{
+ register int *kp, prot;
+
+ kp = protection_codes;
+ for (prot = 0; prot < 8; prot++) {
+ switch (prot) {
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
+ *kp++ = 0;
+ break;
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
+ *kp++ = PG_RO;
+ break;
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ *kp++ = PG_RW;
+ break;
+ }
+ }
+}
+
+boolean_t
+pmap_testbit(pa, bit)
+ register vm_offset_t pa;
+ int bit;
+{
+ register pv_entry_t pv;
+ register int *pte, ix;
+ int s;
+
+ if (pa < vm_first_phys || pa >= vm_last_phys)
+ return(FALSE);
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Check saved info first
+ */
+ if (pmap_attributes[pa_index(pa)] & bit) {
+ splx(s);
+ return(TRUE);
+ }
+ /*
+ * Not found, check current mappings returning
+ * immediately if found.
+ */
+ if (pv->pv_pmap != NULL) {
+ for (; pv; pv = pv->pv_next) {
+ pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
+ ix = 0;
+ do {
+ if (*pte++ & bit) {
+ splx(s);
+ return(TRUE);
+ }
+ } while (++ix != i386pagesperpage);
+ }
+ }
+ splx(s);
+ return(FALSE);
+}
+
+pmap_changebit(pa, bit, setem)
+ register vm_offset_t pa;
+ int bit;
+ boolean_t setem;
+{
+ register pv_entry_t pv;
+ register int *pte, npte, ix;
+ vm_offset_t va;
+ int s;
+ boolean_t firstpage = TRUE;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_BITS)
+ printf("pmap_changebit(%x, %x, %s)",
+ pa, bit, setem ? "set" : "clear");
+#endif
+ if (pa < vm_first_phys || pa >= vm_last_phys)
+ return;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Clear saved attributes (modify, reference)
+ */
+ if (!setem)
+ pmap_attributes[pa_index(pa)] &= ~bit;
+ /*
+ * Loop over all current mappings setting/clearing as appropos
+ * If setting RO do we need to clear the VAC?
+ */
+ if (pv->pv_pmap != NULL) {
+#ifdef DEBUG
+ int toflush = 0;
+#endif
+ for (; pv; pv = pv->pv_next) {
+#ifdef DEBUG
+ toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
+#endif
+ va = pv->pv_va;
+
+ /*
+ * XXX don't write protect pager mappings
+ */
+ if (bit == PG_RO) {
+ extern vm_offset_t pager_sva, pager_eva;
+
+ if (va >= pager_sva && va < pager_eva)
+ continue;
+ }
+
+ pte = (int *) pmap_pte(pv->pv_pmap, va);
+ ix = 0;
+ do {
+ if (setem)
+ npte = *pte | bit;
+ else
+ npte = *pte & ~bit;
+ if (*pte != npte) {
+ *pte = npte;
+ /*TBIS(va);*/
+ }
+ va += I386_PAGE_SIZE;
+ pte++;
+ } while (++ix != i386pagesperpage);
+
+ if (curproc && pv->pv_pmap == &curproc->p_vmspace->vm_pmap)
+ pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr);
+ }
+#ifdef somethinglikethis
+ if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
+ if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
+ DCIA();
+ else if (toflush == 2)
+ DCIS();
+ else
+ DCIU();
+ }
+#endif
+ }
+ splx(s);
+}
+
+#ifdef DEBUG
+pmap_pvdump(pa)
+ vm_offset_t pa;
+{
+ register pv_entry_t pv;
+
+ printf("pa %x", pa);
+ for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
+ printf(" -> pmap %x, va %x, flags %x",
+ pv->pv_pmap, pv->pv_va, pv->pv_flags);
+ pads(pv->pv_pmap);
+ }
+ printf(" ");
+}
+
+#ifdef notyet
+pmap_check_wiring(str, va)
+ char *str;
+ vm_offset_t va;
+{
+ vm_map_entry_t entry;
+ register int count, *pte;
+
+ va = trunc_page(va);
+ if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
+ !pmap_pte_v(pmap_pte(kernel_pmap, va)))
+ return;
+
+ if (!vm_map_lookup_entry(pt_map, va, &entry)) {
+ pg("wired_check: entry for %x not found\n", va);
+ return;
+ }
+ count = 0;
+ for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
+ if (*pte)
+ count++;
+ if (entry->wired_count != count)
+ pg("*%s*: %x: w%d/a%d\n",
+ str, va, entry->wired_count, count);
+}
+#endif
+
+/* print address space of pmap*/
+pads(pm) pmap_t pm; {
+ unsigned va, i, j;
+ struct pte *ptep;
+
+ if(pm == kernel_pmap) return;
+ for (i = 0; i < 1024; i++)
+ if(pm->pm_pdir[i].pd_v)
+ for (j = 0; j < 1024 ; j++) {
+ va = (i<<22)+(j<<12);
+ if (pm == kernel_pmap && va < 0xfe000000)
+ continue;
+ if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
+ continue;
+ ptep = pmap_pte(pm, va);
+ if(pmap_pte_v(ptep))
+ printf("%x:%x ", va, *(int *)ptep);
+ } ;
+
+}
+#endif
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
new file mode 100644
index 0000000..10b9ac2
--- /dev/null
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -0,0 +1,105 @@
+
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
+ */
+
+#include "param.h"
+#include "systm.h"
+#include "ioctl.h"
+#include "file.h"
+#include "time.h"
+#include "proc.h"
+#include "uio.h"
+#include "kernel.h"
+#include "mtio.h"
+#include "buf.h"
+#include "trace.h"
+
+#ifdef TRACE
+int nvualarm;
+
+vtrace(p, uap, retval)
+ struct proc *p;
+ register struct args {
+ int request;
+ int value;
+ } *uap;
+ int *retval;
+{
+ int vdoualarm();
+
+ switch (uap->request) {
+
+ case VTR_DISABLE: /* disable a trace point */
+ case VTR_ENABLE: /* enable a trace point */
+ if (uap->value < 0 || uap->value >= TR_NFLAGS)
+ return (EINVAL);
+ *retval = traceflags[uap->value];
+ traceflags[uap->value] = uap->request;
+ break;
+
+ case VTR_VALUE: /* return a trace point setting */
+ if (uap->value < 0 || uap->value >= TR_NFLAGS)
+ return (EINVAL);
+ *retval = traceflags[uap->value];
+ break;
+
+ case VTR_UALARM: /* set a real-time ualarm, less than 1 min */
+ if (uap->value <= 0 || uap->value > 60 * hz || nvualarm > 5)
+ return (EINVAL);
+ nvualarm++;
+ timeout(vdoualarm, (caddr_t)p->p_pid, uap->value);
+ break;
+
+ case VTR_STAMP:
+ trace(TR_STAMP, uap->value, p->p_pid);
+ break;
+ }
+ return (0);
+}
+
+vdoualarm(arg)
+ int arg;
+{
+ register struct proc *p;
+
+ p = pfind(arg);
+ if (p)
+ psignal(p, 16);
+ nvualarm--;
+}
+#endif
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
new file mode 100644
index 0000000..57195f3
--- /dev/null
+++ b/sys/amd64/amd64/trap.c
@@ -0,0 +1,547 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the University of Utah, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)trap.c 7.4 (Berkeley) 5/13/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00137
+ * -------------------- ----- ----------------------
+ *
+ * 08 Apr 93 Bruce Evans Several VM system fixes
+ * Paul Kranenburg Add counter for vmstat
+ */
+static char rcsid[] = "$Header: /usr/bill/working/sys/i386/i386/RCS/trap.c,v 1.2 92/01/21 14:22:13 william Exp $";
+
+/*
+ * 386 Trap and System call handleing
+ */
+
+#include "machine/cpu.h"
+#include "machine/psl.h"
+#include "machine/reg.h"
+
+#include "param.h"
+#include "systm.h"
+#include "proc.h"
+#include "user.h"
+#include "acct.h"
+#include "kernel.h"
+#ifdef KTRACE
+#include "ktrace.h"
+#endif
+
+#include "vm/vm_param.h"
+#include "vm/pmap.h"
+#include "vm/vm_map.h"
+#include "sys/vmmeter.h"
+
+#include "machine/trap.h"
+
+
+struct sysent sysent[];
+int nsysent;
+int dostacklimits;
+unsigned rcr2();
+extern short cpl;
+
+
+/*
+ * trap(frame):
+ * Exception, fault, and trap interface to BSD kernel. This
+ * common code is called from assembly language IDT gate entry
+ * routines that prepare a suitable stack frame, and restore this
+ * frame after the exception has been processed. Note that the
+ * effect is as if the arguments were passed call by reference.
+ */
+
+/*ARGSUSED*/
+trap(frame)
+ struct trapframe frame;
+{
+ register int i;
+ register struct proc *p = curproc;
+ struct timeval syst;
+ int ucode, type, code, eva;
+
+ frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */
+ type = frame.tf_trapno;
+#include "ddb.h"
+#if NDDB > 0
+ if (curpcb && curpcb->pcb_onfault) {
+ if (frame.tf_trapno == T_BPTFLT
+ || frame.tf_trapno == T_TRCTRAP)
+ if (kdb_trap (type, 0, &frame))
+ return;
+ }
+#endif
+
+/*pg("trap type %d code = %x eip = %x cs = %x eva = %x esp %x",
+ frame.tf_trapno, frame.tf_err, frame.tf_eip,
+ frame.tf_cs, rcr2(), frame.tf_esp);*/
+if(curpcb == 0 || curproc == 0) goto we_re_toast;
+ if (curpcb->pcb_onfault && frame.tf_trapno != 0xc) {
+copyfault:
+ frame.tf_eip = (int)curpcb->pcb_onfault;
+ return;
+ }
+
+ syst = p->p_stime;
+ if (ISPL(frame.tf_cs) == SEL_UPL) {
+ type |= T_USER;
+ p->p_regs = (int *)&frame;
+ curpcb->pcb_flags |= FM_TRAP; /* used by sendsig */
+ }
+
+ ucode=0;
+ eva = rcr2();
+ code = frame.tf_err;
+ switch (type) {
+
+ default:
+ we_re_toast:
+#ifdef KDB
+ if (kdb_trap(&psl))
+ return;
+#endif
+#if NDDB > 0
+ if (kdb_trap (type, 0, &frame))
+ return;
+#endif
+
+ printf("trap type %d code = %x eip = %x cs = %x eflags = %x ",
+ frame.tf_trapno, frame.tf_err, frame.tf_eip,
+ frame.tf_cs, frame.tf_eflags);
+ eva = rcr2();
+ printf("cr2 %x cpl %x\n", eva, cpl);
+ /* type &= ~T_USER; */ /* XXX what the hell is this */
+ panic("trap");
+ /*NOTREACHED*/
+
+ case T_SEGNPFLT|T_USER:
+ case T_STKFLT|T_USER:
+ case T_PROTFLT|T_USER: /* protection fault */
+ ucode = code + BUS_SEGM_FAULT ;
+ i = SIGBUS;
+ break;
+
+ case T_PRIVINFLT|T_USER: /* privileged instruction fault */
+ case T_RESADFLT|T_USER: /* reserved addressing fault */
+ case T_RESOPFLT|T_USER: /* reserved operand fault */
+ case T_FPOPFLT|T_USER: /* coprocessor operand fault */
+ ucode = type &~ T_USER;
+ i = SIGILL;
+ break;
+
+ case T_ASTFLT|T_USER: /* Allow process switch */
+ astoff();
+ cnt.v_soft++;
+ if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
+ addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
+ p->p_flag &= ~SOWEUPC;
+ }
+ goto out;
+
+ case T_DNA|T_USER:
+#ifdef NPX
+ /* if a transparent fault (due to context switch "late") */
+ if (npxdna()) return;
+#endif
+ i = math_emulate(&frame);
+ if (i == 0) return;
+ ucode = FPE_FPU_NP_TRAP;
+ break;
+
+ case T_BOUND|T_USER:
+ ucode = FPE_SUBRNG_TRAP;
+ i = SIGFPE;
+ break;
+
+ case T_OFLOW|T_USER:
+ ucode = FPE_INTOVF_TRAP;
+ i = SIGFPE;
+ break;
+
+ case T_DIVIDE|T_USER:
+ ucode = FPE_INTDIV_TRAP;
+ i = SIGFPE;
+ break;
+
+ case T_ARITHTRAP|T_USER:
+ ucode = code;
+ i = SIGFPE;
+ break;
+
+ case T_PAGEFLT: /* allow page faults in kernel mode */
+#if 0
+ /* XXX - check only applies to 386's and 486's with WP off */
+ if (code & PGEX_P) goto we_re_toast;
+#endif
+
+ /* fall into */
+ case T_PAGEFLT|T_USER: /* page fault */
+ {
+ register vm_offset_t va;
+ register struct vmspace *vm = p->p_vmspace;
+ register vm_map_t map;
+ int rv;
+ vm_prot_t ftype;
+ extern vm_map_t kernel_map;
+ unsigned nss,v;
+
+ va = trunc_page((vm_offset_t)eva);
+ /*
+ * Avoid even looking at pde_v(va) for high va's. va's
+ * above VM_MAX_KERNEL_ADDRESS don't correspond to normal
+ * PDE's (half of them correspond to APDEpde and half to
+ * an unmapped kernel PDE). va's betweeen 0xFEC00000 and
+ * VM_MAX_KERNEL_ADDRESS correspond to unmapped kernel PDE's
+ * (XXX - why are only 3 initialized when 6 are required to
+ * reach VM_MAX_KERNEL_ADDRESS?). Faulting in an unmapped
+ * kernel page table would give inconsistent PTD's.
+ *
+ * XXX - faulting in unmapped page tables wastes a page if
+ * va turns out to be invalid.
+ *
+ * XXX - should "kernel address space" cover the kernel page
+ * tables? Might have same problem with PDEpde as with
+ * APDEpde (or there may be no problem with APDEpde).
+ */
+ if (va > 0xFEBFF000) {
+ rv = KERN_FAILURE; /* becomes SIGBUS */
+ goto nogo;
+ }
+ /*
+ * It is only a kernel address space fault iff:
+ * 1. (type & T_USER) == 0 and
+ * 2. pcb_onfault not set or
+ * 3. pcb_onfault set but supervisor space fault
+ * The last can occur during an exec() copyin where the
+ * argument space is lazy-allocated.
+ */
+ if (type == T_PAGEFLT && va >= KERNBASE)
+ map = kernel_map;
+ else
+ map = &vm->vm_map;
+ if (code & PGEX_W)
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+
+#ifdef DEBUG
+ if (map == kernel_map && va == 0) {
+ printf("trap: bad kernel access at %x\n", va);
+ goto we_re_toast;
+ }
+#endif
+
+ /*
+ * XXX: rude hack to make stack limits "work"
+ */
+ nss = 0;
+ if ((caddr_t)va >= vm->vm_maxsaddr && map != kernel_map
+ && dostacklimits) {
+ nss = clrnd(btoc((unsigned)vm->vm_maxsaddr
+ + MAXSSIZ - (unsigned)va));
+ if (nss > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
+/*pg("trap rlimit %d, maxsaddr %x va %x ", nss, vm->vm_maxsaddr, va);*/
+ rv = KERN_FAILURE;
+ goto nogo;
+ }
+ }
+
+ /* check if page table is mapped, if not, fault it first */
+#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
+ if (!pde_v(va)) {
+ v = trunc_page(vtopte(va));
+ rv = vm_fault(map, v, ftype, FALSE);
+ if (rv != KERN_SUCCESS) goto nogo;
+ /* check if page table fault, increment wiring */
+ vm_map_pageable(map, v, round_page(v+1), FALSE);
+ } else v=0;
+ rv = vm_fault(map, va, ftype, FALSE);
+ if (rv == KERN_SUCCESS) {
+ /*
+ * XXX: continuation of rude stack hack
+ */
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ va = trunc_page(vtopte(va));
+ /* for page table, increment wiring
+ as long as not a page table fault as well */
+ if (!v && type != T_PAGEFLT)
+ vm_map_pageable(map, va, round_page(va+1), FALSE);
+ if (type == T_PAGEFLT)
+ return;
+ goto out;
+ }
+nogo:
+ if (type == T_PAGEFLT) {
+ if (curpcb->pcb_onfault)
+ goto copyfault;
+ printf("vm_fault(%x, %x, %x, 0) -> %x\n",
+ map, va, ftype, rv);
+ printf(" type %x, code %x\n",
+ type, code);
+ goto we_re_toast;
+ }
+ i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
+ break;
+ }
+
+#if NDDB == 0
+ case T_TRCTRAP: /* trace trap -- someone single stepping lcall's */
+ frame.tf_eflags &= ~PSL_T;
+
+ /* Q: how do we turn it on again? */
+ return;
+#endif
+
+ case T_BPTFLT|T_USER: /* bpt instruction fault */
+ case T_TRCTRAP|T_USER: /* trace trap */
+ frame.tf_eflags &= ~PSL_T;
+ i = SIGTRAP;
+ break;
+
+#include "isa.h"
+#if NISA > 0
+ case T_NMI:
+ case T_NMI|T_USER:
+#if NDDB > 0
+ /* NMI can be hooked up to a pushbutton for debugging */
+ printf ("NMI ... going to debugger\n");
+ if (kdb_trap (type, 0, &frame))
+ return;
+#endif
+ /* machine/parity/power fail/"kitchen sink" faults */
+ if(isa_nmi(code) == 0) return;
+ else goto we_re_toast;
+#endif
+ }
+
+ trapsignal(p, i, ucode);
+ if ((type & T_USER) == 0)
+ return;
+out:
+ while (i = CURSIG(p))
+ psig(i);
+ p->p_pri = p->p_usrpri;
+ if (want_resched) {
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we setrq ourselves but before we
+ * swtch()'ed, we might not be on the queue indicated by
+ * our priority.
+ */
+ (void) splclock();
+ setrq(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ swtch();
+ (void) splnone();
+ while (i = CURSIG(p))
+ psig(i);
+ }
+ if (p->p_stats->p_prof.pr_scale) {
+ int ticks;
+ struct timeval *tv = &p->p_stime;
+
+ ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
+ (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
+ if (ticks) {
+#ifdef PROFTIMER
+ extern int profscale;
+ addupc(frame.tf_eip, &p->p_stats->p_prof,
+ ticks * profscale);
+#else
+ addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
+#endif
+ }
+ }
+ curpri = p->p_pri;
+ curpcb->pcb_flags &= ~FM_TRAP; /* used by sendsig */
+}
+
+/*
+ * Compensate for 386 brain damage (missing URKR)
+ */
+int trapwrite(unsigned addr) {
+ int rv;
+ vm_offset_t va;
+
+ va = trunc_page((vm_offset_t)addr);
+ if (va > VM_MAXUSER_ADDRESS) return(1);
+ rv = vm_fault(&curproc->p_vmspace->vm_map, va,
+ VM_PROT_READ | VM_PROT_WRITE, FALSE);
+ if (rv == KERN_SUCCESS) return(0);
+ else return(1);
+}
+
+/*
+ * syscall(frame):
+ * System call request from POSIX system call gate interface to kernel.
+ * Like trap(), argument is call by reference.
+ */
+/*ARGSUSED*/
+syscall(frame)
+ volatile struct syscframe frame;
+{
+ register int *locr0 = ((int *)&frame);
+ register caddr_t params;
+ register int i;
+ register struct sysent *callp;
+ register struct proc *p = curproc;
+ struct timeval syst;
+ int error, opc;
+ int args[8], rval[2];
+ int code;
+
+#ifdef lint
+ r0 = 0; r0 = r0; r1 = 0; r1 = r1;
+#endif
+ syst = p->p_stime;
+ if (ISPL(frame.sf_cs) != SEL_UPL)
+ panic("syscall");
+
+ code = frame.sf_eax;
+ curpcb->pcb_flags &= ~FM_TRAP; /* used by sendsig */
+ p->p_regs = (int *)&frame;
+ params = (caddr_t)frame.sf_esp + sizeof (int) ;
+
+ /*
+ * Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
+ */
+ opc = frame.sf_eip - 7;
+ callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
+ if (callp == sysent) {
+ i = fuword(params);
+ params += sizeof (int);
+ callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
+ }
+
+ if ((i = callp->sy_narg * sizeof (int)) &&
+ (error = copyin(params, (caddr_t)args, (u_int)i))) {
+ frame.sf_eax = error;
+ frame.sf_eflags |= PSL_C; /* carry bit */
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p->p_tracep, code, callp->sy_narg, &args);
+#endif
+ goto done;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p->p_tracep, code, callp->sy_narg, &args);
+#endif
+ rval[0] = 0;
+ rval[1] = frame.sf_edx;
+/*pg("%d. s %d\n", p->p_pid, code);*/
+ error = (*callp->sy_call)(p, args, rval);
+ if (error == ERESTART)
+ frame.sf_eip = opc;
+ else if (error != EJUSTRETURN) {
+ if (error) {
+/*pg("error %d", error);*/
+ frame.sf_eax = error;
+ frame.sf_eflags |= PSL_C; /* carry bit */
+ } else {
+ frame.sf_eax = rval[0];
+ frame.sf_edx = rval[1];
+ frame.sf_eflags &= ~PSL_C; /* carry bit */
+ }
+ }
+ /* else if (error == EJUSTRETURN) */
+ /* nothing to do */
+done:
+ /*
+ * Reinitialize proc pointer `p' as it may be different
+ * if this is a child returning from fork syscall.
+ */
+ p = curproc;
+ while (i = CURSIG(p))
+ psig(i);
+ p->p_pri = p->p_usrpri;
+ if (want_resched) {
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we setrq ourselves but before we
+ * swtch()'ed, we might not be on the queue indicated by
+ * our priority.
+ */
+ (void) splclock();
+ setrq(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ swtch();
+ (void) splnone();
+ while (i = CURSIG(p))
+ psig(i);
+ }
+ if (p->p_stats->p_prof.pr_scale) {
+ int ticks;
+ struct timeval *tv = &p->p_stime;
+
+ ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
+ (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
+ if (ticks) {
+#ifdef PROFTIMER
+ extern int profscale;
+ addupc(frame.sf_eip, &p->p_stats->p_prof,
+ ticks * profscale);
+#else
+ addupc(frame.sf_eip, &p->p_stats->p_prof, ticks);
+#endif
+ }
+ }
+ curpri = p->p_pri;
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p->p_tracep, code, error, rval[0]);
+#endif
+#ifdef DIAGNOSTICx
+{ extern int _udatasel, _ucodesel;
+ if (frame.sf_ss != _udatasel)
+ printf("ss %x call %d\n", frame.sf_ss, code);
+ if ((frame.sf_cs&0xffff) != _ucodesel)
+ printf("cs %x call %d\n", frame.sf_cs, code);
+ if (frame.sf_eip > VM_MAXUSER_ADDRESS) {
+ printf("eip %x call %d\n", frame.sf_eip, code);
+ frame.sf_eip = 0;
+ }
+}
+#endif
+}
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
new file mode 100644
index 0000000..0fb7701
--- /dev/null
+++ b/sys/amd64/amd64/tsc.c
@@ -0,0 +1,271 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz and Don Ahn.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)clock.c 7.2 (Berkeley) 5/12/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 5 00158
+ * -------------------- ----- ----------------------
+ *
+ * 14 Aug 92 Arne Henrik Juul Added code in the kernel to
+ * allow for DST in the BIOS.
+ * 17 Jan 93 Bruce Evans Fixed leap year and second
+ * calculations
+ * 01 Feb 93 Julian Elischer Added code to for the cpu
+ * speed independent spinwait()
+ * function, (used by scsi and others)
+ * 25 Mar 93 Sean Eric Fagan Add microtimer support using timer 1
+ * 08 Apr 93 Poul-Henning Kamp/P-HK Fixes, and support for dcfclock
+ * 26 Apr 93 Bruce Evans Eliminate findspeed, new spinwait
+ * 26 Apr 93 Rodney W. Grimes I merged in Bruce changes and hope I
+ * still kept the other fixes... Had to
+ * add back in findcpuspeed that Bruce
+ * had removed.
+ */
+
+/*
+ * Primitive clock interrupt routines.
+ */
+#include "param.h"
+#include "systm.h"
+#include "time.h"
+#include "kernel.h"
+#include "machine/segments.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/isa.h"
+#include "i386/isa/rtc.h"
+#include "i386/isa/timerreg.h"
+
+#define DAYST 119
+#define DAYEN 303
+
+/* X-tals being what they are, it's nice to be able to fudge this one... */
+/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
+#ifndef TIMER_FREQ
+#define TIMER_FREQ 1193182 /* XXX - should be in isa.h */
+#endif
+
+startrtclock() {
+ int s;
+
+ findcpuspeed(); /* use the clock (while it's free)
+ to find the cpu speed */
+ /* initialize 8253 clock */
+ outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
+
+ /* Correct rounding will buy us a better precision in timekeeping */
+ outb (IO_TIMER1, (TIMER_FREQ+hz/2)/hz);
+ outb (IO_TIMER1, ((TIMER_FREQ+hz/2)/hz)/256);
+
+ /* initialize brain-dead battery powered clock */
+ outb (IO_RTC, RTC_STATUSA);
+ outb (IO_RTC+1, 0x26);
+ outb (IO_RTC, RTC_STATUSB);
+ outb (IO_RTC+1, 2);
+
+ outb (IO_RTC, RTC_DIAG);
+ if (s = inb (IO_RTC+1))
+ printf("RTC BIOS diagnostic error %b\n", s, RTCDG_BITS);
+ outb (IO_RTC, RTC_DIAG);
+ outb (IO_RTC+1, 0);
+}
+
+unsigned int delaycount; /* calibrated loop variable (1 millisecond) */
+
+#define FIRST_GUESS 0x2000
+findcpuspeed()
+{
+ unsigned char low;
+ unsigned int remainder;
+
+ /* Put counter in count down mode */
+ outb(IO_TIMER1+3, 0x34);
+ outb(IO_TIMER1, 0xff);
+ outb(IO_TIMER1, 0xff);
+ delaycount = FIRST_GUESS;
+ spinwait(1);
+ /* Read the value left in the counter */
+ low = inb(IO_TIMER1); /* least siginifcant */
+ remainder = inb(IO_TIMER1); /* most significant */
+ remainder = (remainder<<8) + low ;
+ /* Formula for delaycount is :
+ * (loopcount * timer clock speed)/ (counter ticks * 1000)
+ */
+ delaycount = (FIRST_GUESS * (TIMER_FREQ/1000)) / (0xffff-remainder);
+}
+
+
+/* convert 2 digit BCD number */
+bcd(i)
+int i;
+{
+ return ((i/16)*10 + (i%16));
+}
+
+/* convert years to seconds (from 1970) */
+unsigned long
+ytos(y)
+int y;
+{
+ int i;
+ unsigned long ret;
+
+ ret = 0;
+ for(i = 1970; i < y; i++) {
+ if (i % 4) ret += 365*24*60*60;
+ else ret += 366*24*60*60;
+ }
+ return ret;
+}
+
+/* convert months to seconds */
+unsigned long
+mtos(m,leap)
+int m,leap;
+{
+ int i;
+ unsigned long ret;
+
+ ret = 0;
+ for(i=1;i<m;i++) {
+ switch(i){
+ case 1: case 3: case 5: case 7: case 8: case 10: case 12:
+ ret += 31*24*60*60; break;
+ case 4: case 6: case 9: case 11:
+ ret += 30*24*60*60; break;
+ case 2:
+ if (leap) ret += 29*24*60*60;
+ else ret += 28*24*60*60;
+ }
+ }
+ return ret;
+}
+
+
+/*
+ * Initialize the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+inittodr(base)
+ time_t base;
+{
+ unsigned long sec;
+ int leap,day_week,t,yd;
+ int sa,s;
+
+ /* do we have a realtime clock present? (otherwise we loop below) */
+ sa = rtcin(RTC_STATUSA);
+ if (sa == 0xff || sa == 0) return;
+
+ /* ready for a read? */
+ while ((sa&RTCSA_TUP) == RTCSA_TUP)
+ sa = rtcin(RTC_STATUSA);
+
+ sec = bcd(rtcin(RTC_YEAR)) + 1900;
+ if (sec < 1970)
+ sec += 100;
+ leap = !(sec % 4); sec = ytos(sec); /* year */
+ yd = mtos(bcd(rtcin(RTC_MONTH)),leap); sec += yd; /* month */
+ t = (bcd(rtcin(RTC_DAY))-1) * 24*60*60; sec += t; yd += t; /* date */
+ day_week = rtcin(RTC_WDAY); /* day */
+ sec += bcd(rtcin(RTC_HRS)) * 60*60; /* hour */
+ sec += bcd(rtcin(RTC_MIN)) * 60; /* minutes */
+ sec += bcd(rtcin(RTC_SEC)); /* seconds */
+
+ /* XXX off by one? Need to calculate DST on SUNDAY */
+ /* Perhaps we should have the RTC hold GMT time to save */
+ /* us the bother of converting. */
+ yd = yd / (24*60*60);
+ if ((yd >= DAYST) && ( yd <= DAYEN)) {
+ sec -= 60*60;
+ }
+ sec += tz.tz_minuteswest * 60;
+
+ time.tv_sec = sec;
+}
+
+#ifdef garbage
+/*
+ * Initialze the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+test_inittodr(base)
+ time_t base;
+{
+
+ outb(IO_RTC,9); /* year */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,8); /* month */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,7); /* day */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,4); /* hour */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,2); /* minutes */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,0); /* seconds */
+ printf("%d\n",bcd(inb(IO_RTC+1)));
+
+ time.tv_sec = base;
+}
+#endif
+
+/*
+ * Restart the clock.
+ */
+resettodr()
+{
+}
+
+/*
+ * Wire clock interrupt in.
+ */
+#define V(s) __CONCAT(V, s)
+extern V(clk)();
+enablertclock() {
+ setidt(ICU_OFFSET+0, &V(clk), SDT_SYS386IGT, SEL_KPL);
+ INTREN(IRQ0);
+}
+
+/*
+ * Delay for some number of milliseconds.
+ */
+void
+spinwait(millisecs)
+ int millisecs;
+{
+ DELAY(1000 * millisecs);
+}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
new file mode 100644
index 0000000..27ef912
--- /dev/null
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -0,0 +1,410 @@
+/*-
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00154
+ * -------------------- ----- ----------------------
+ *
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ *
+ */
+
+/*
+ * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
+ */
+static char rcsid[] = "$Header: /usr/bill/working/sys/i386/i386/RCS/vm_machdep.c,v 1.2 92/01/21 14:22:17 william Exp $";
+
+#include "param.h"
+#include "systm.h"
+#include "proc.h"
+#include "malloc.h"
+#include "buf.h"
+#include "user.h"
+
+#include "../include/cpu.h"
+
+#include "vm/vm.h"
+#include "vm/vm_kern.h"
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the kernel stack and pcb, making the child
+ * ready to run, and marking it so that it can return differently
+ * than the parent. Returns 1 in the child process, 0 in the parent.
+ * We currently double-map the user area so that the stack is at the same
+ * address in each process; in the future we will probably relocate
+ * the frame pointers on the stack after copying.
+ */
+cpu_fork(p1, p2)
+ register struct proc *p1, *p2;
+{
+ register struct user *up = p2->p_addr;
+ int foo, offset, addr, i;
+ extern char kstack[];
+ extern int mvesp();
+
+ /*
+ * Copy pcb and stack from proc p1 to p2.
+ * We do this as cheaply as possible, copying only the active
+ * part of the stack. The stack and pcb need to agree;
+ * this is tricky, as the final pcb is constructed by savectx,
+ * but its frame isn't yet on the stack when the stack is copied.
+ * swtch compensates for this when the child eventually runs.
+ * This should be done differently, with a single call
+ * that copies and updates the pcb+stack,
+ * replacing the bcopy and savectx.
+ */
+ p2->p_addr->u_pcb = p1->p_addr->u_pcb;
+ offset = mvesp() - (int)kstack;
+ bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
+ (unsigned) ctob(UPAGES) - offset);
+ p2->p_regs = p1->p_regs;
+
+ /*
+ * Wire top of address space of child to it's kstack.
+ * First, fault in a page of pte's to map it.
+ */
+ addr = trunc_page((u_int)vtopte(kstack));
+ vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);
+ for (i=0; i < UPAGES; i++)
+ pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG,
+ pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG), VM_PROT_READ, 1);
+
+ pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
+
+ /*
+ *
+ * Arrange for a non-local goto when the new process
+ * is started, to resume here, returning nonzero from setjmp.
+ */
+ if (savectx(up, 1)) {
+ /*
+ * Return 1 in child.
+ */
+ return (1);
+ }
+ return (0);
+}
+
+#ifdef notyet
+/*
+ * cpu_exit is called as the last action during exit.
+ *
+ * We change to an inactive address space and a "safe" stack,
+ * passing thru an argument to the new stack. Now, safely isolated
+ * from the resources we're shedding, we release the address space
+ * and any remaining machine-dependent resources, including the
+ * memory for the user structure and kernel stack.
+ *
+ * Next, we assign a dummy context to be written over by swtch,
+ * calling it to send this process off to oblivion.
+ * [The nullpcb allows us to minimize cost in swtch() by not having
+ * a special case].
+ */
+struct proc *swtch_to_inactive();
+cpu_exit(p)
+ register struct proc *p;
+{
+ static struct pcb nullpcb; /* pcb to overwrite on last swtch */
+
+#ifdef NPX
+ npxexit(p);
+#endif
+
+ /* move to inactive space and stack, passing arg accross */
+ p = swtch_to_inactive(p);
+
+ /* drop per-process resources */
+ vmspace_free(p->p_vmspace);
+ kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
+
+ p->p_addr = (struct user *) &nullpcb;
+ splclock();
+ swtch();
+ /* NOTREACHED */
+}
+#else
+cpu_exit(p)
+ register struct proc *p;
+{
+
+#ifdef NPX
+ npxexit(p);
+#endif
+ splclock();
+ swtch();
+}
+
+cpu_wait(p) struct proc *p; {
+
+ /* drop per-process resources */
+ vmspace_free(p->p_vmspace);
+ kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
+}
+#endif
+
+/*
+ * Set a red zone in the kernel stack after the u. area.
+ */
+setredzone(pte, vaddr)
+ u_short *pte;
+ caddr_t vaddr;
+{
+/* eventually do this by setting up an expand-down stack segment
+ for ss0: selector, allowing stack access down to top of u.
+ this means though that protection violations need to be handled
+ thru a double fault exception that must do an integral task
+ switch to a known good context, within which a dump can be
+ taken. a sensible scheme might be to save the initial context
+ used by sched (that has physical memory mapped 1:1 at bottom)
+ and take the dump while still in mapped mode */
+}
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+pagemove(from, to, size)
+ register caddr_t from, to;
+ int size;
+{
+ register struct pte *fpte, *tpte;
+
+ if (size % CLBYTES)
+ panic("pagemove");
+ fpte = kvtopte(from);
+ tpte = kvtopte(to);
+ while (size > 0) {
+ *tpte++ = *fpte;
+ *(int *)fpte++ = 0;
+ from += NBPG;
+ to += NBPG;
+ size -= NBPG;
+ }
+ tlbflush();
+}
+
+/*
+ * Convert kernel VA to physical address
+ */
+kvtop(addr)
+ register caddr_t addr;
+{
+ vm_offset_t va;
+
+ va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
+ if (va == 0)
+ panic("kvtop: zero page frame");
+ return((int)va);
+}
+
+#ifdef notdef
+/*
+ * The probe[rw] routines should probably be redone in assembler
+ * for efficiency.
+ */
+prober(addr)
+ register u_int addr;
+{
+ register int page;
+ register struct proc *p;
+
+ if (addr >= USRSTACK)
+ return(0);
+ p = u.u_procp;
+ page = btop(addr);
+ if (page < dptov(p, p->p_dsize) || page > sptov(p, p->p_ssize))
+ return(1);
+ return(0);
+}
+
+probew(addr)
+ register u_int addr;
+{
+ register int page;
+ register struct proc *p;
+
+ if (addr >= USRSTACK)
+ return(0);
+ p = u.u_procp;
+ page = btop(addr);
+ if (page < dptov(p, p->p_dsize) || page > sptov(p, p->p_ssize))
+ return((*(int *)vtopte(p, page) & PG_PROT) == PG_UW);
+ return(0);
+}
+
+/*
+ * NB: assumes a physically contiguous kernel page table
+ * (makes life a LOT simpler).
+ */
+kernacc(addr, count, rw)
+ register u_int addr;
+ int count, rw;
+{
+ register struct pde *pde;
+ register struct pte *pte;
+ register int ix, cnt;
+ extern long Syssize;
+
+ if (count <= 0)
+ return(0);
+ pde = (struct pde *)((u_int)u.u_procp->p_p0br + u.u_procp->p_szpt * NBPG);
+ ix = (addr & PD_MASK) >> PD_SHIFT;
+ cnt = ((addr + count + (1 << PD_SHIFT) - 1) & PD_MASK) >> PD_SHIFT;
+ cnt -= ix;
+ for (pde += ix; cnt; cnt--, pde++)
+ if (pde->pd_v == 0)
+ return(0);
+ ix = btop(addr-0xfe000000);
+ cnt = btop(addr-0xfe000000+count+NBPG-1);
+ if (cnt > (int)&Syssize)
+ return(0);
+ cnt -= ix;
+ for (pte = &Sysmap[ix]; cnt; cnt--, pte++)
+ if (pte->pg_v == 0 /*|| (rw == B_WRITE && pte->pg_prot == 1)*/)
+ return(0);
+ return(1);
+}
+
+useracc(addr, count, rw)
+ register u_int addr;
+ int count, rw;
+{
+ register int (*func)();
+ register u_int addr2;
+ extern int prober(), probew();
+
+ if (count <= 0)
+ return(0);
+ addr2 = addr;
+ addr += count;
+ func = (rw == B_READ) ? prober : probew;
+ do {
+ if ((*func)(addr2) == 0)
+ return(0);
+ addr2 = (addr2 + NBPG) & ~PGOFSET;
+ } while (addr2 < addr);
+ return(1);
+}
+#endif
+
+extern vm_map_t phys_map;
+
+/*
+ * Map an IO request into kernel virtual address space. Requests fall into
+ * one of five catagories:
+ *
+ * B_PHYS|B_UAREA: User u-area swap.
+ * Address is relative to start of u-area (p_addr).
+ * B_PHYS|B_PAGET: User page table swap.
+ * Address is a kernel VA in usrpt (Usrptmap).
+ * B_PHYS|B_DIRTY: Dirty page push.
+ * Address is a VA in proc2's address space.
+ * B_PHYS|B_PGIN: Kernel pagein of user pages.
+ * Address is VA in user's address space.
+ * B_PHYS: User "raw" IO request.
+ * Address is VA in user's address space.
+ *
+ * All requests are (re)mapped into kernel VA space via the useriomap
+ * (a name with only slightly more meaning than "kernelmap")
+ */
+vmapbuf(bp)
+ register struct buf *bp;
+{
+ register int npf;
+ register caddr_t addr;
+ register long flags = bp->b_flags;
+ struct proc *p;
+ int off;
+ vm_offset_t kva;
+ register vm_offset_t pa;
+
+ if ((flags & B_PHYS) == 0)
+ panic("vmapbuf");
+ addr = bp->b_saveaddr = bp->b_un.b_addr;
+ off = (int)addr & PGOFSET;
+ p = bp->b_proc;
+ npf = btoc(round_page(bp->b_bcount + off));
+ kva = kmem_alloc_wait(phys_map, ctob(npf));
+ bp->b_un.b_addr = (caddr_t) (kva + off);
+ while (npf--) {
+ pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
+ if (pa == 0)
+ panic("vmapbuf: null page frame");
+ pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
+ VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ addr += PAGE_SIZE;
+ kva += PAGE_SIZE;
+ }
+}
+
+/*
+ * Free the io map PTEs associated with this IO operation.
+ * We also invalidate the TLB entries and restore the original b_addr.
+ */
+vunmapbuf(bp)
+ register struct buf *bp;
+{
+ register int npf;
+ register caddr_t addr = bp->b_un.b_addr;
+ vm_offset_t kva;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vunmapbuf");
+ npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
+ kva = (vm_offset_t)((int)addr & ~PGOFSET);
+ kmem_free_wakeup(phys_map, kva, ctob(npf));
+ bp->b_un.b_addr = bp->b_saveaddr;
+ bp->b_saveaddr = NULL;
+}
+
+/*
+ * Force reset the processor by invalidating the entire address space!
+ */
+cpu_reset() {
+
+ /* force a shutdown by unmapping entire address space ! */
+ bzero((caddr_t) PTD, NBPG);
+
+ /* "good night, sweet prince .... <THUNK!>" */
+ tlbflush();
+ /* NOTREACHED */
+}
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
new file mode 100644
index 0000000..583d76c
--- /dev/null
+++ b/sys/amd64/include/cpu.h
@@ -0,0 +1,108 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)cpu.h 5.4 (Berkeley) 5/9/91
+ */
+
+/*
+ * Definitions unique to i386 cpu support.
+ */
+#include "machine/frame.h"
+#include "machine/segments.h"
+
+/*
+ * definitions of cpu-dependent requirements
+ * referenced in generic code
+ */
+#undef COPY_SIGCODE /* don't copy sigcode above user stack in exec */
+
+/*
+ * function vs. inline configuration;
+ * these are defined to get generic functions
+ * rather than inline or machine-dependent implementations
+ */
+#define NEED_MINMAX /* need {,i,l,ul}{min,max} functions */
+#define NEED_FFS /* need ffs function */
+#define NEED_BCMP /* need bcmp function */
+#define NEED_STRLEN /* need strlen function */
+
+#define cpu_exec(p) /* nothing */
+
+/*
+ * Arguments to hardclock, softclock and gatherstats
+ * encapsulate the previous machine state in an opaque
+ * clockframe; for now, use generic intrframe.
+ */
+typedef struct intrframe clockframe;
+
+#define CLKF_USERMODE(framep) (ISPL((framep)->if_cs) == SEL_UPL)
+#define CLKF_BASEPRI(framep) ((framep)->if_ppl == 0)
+#define CLKF_PC(framep) ((framep)->if_eip)
+
+#define resettodr() /* no todr to set */
+
+/*
+ * Preempt the current process if in interrupt from user mode,
+ * or after the current trap/syscall if in system mode.
+ */
+#define need_resched() { want_resched++; aston(); }
+
+/*
+ * Give a profiling tick to the current process from the softclock
+ * interrupt. On tahoe, request an ast to send us through trap(),
+ * marking the proc as needing a profiling tick.
+ */
+#define profile_tick(p, framep) { (p)->p_flag |= SOWEUPC; aston(); }
+
+/*
+ * Notify the current process (p) that it has a signal pending,
+ * process as soon as possible.
+ */
+#define signotify(p) aston()
+
+#define aston() (astpending++)
+
+int astpending; /* need to trap before returning to user mode */
+int want_resched; /* resched() was called */
+
+/*
+ * Kinds of processor
+ */
+
+#define CPU_386SX 0
+#define CPU_386 1
+#define CPU_486SX 2
+#define CPU_486 3
+#define CPU_586 4
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
new file mode 100644
index 0000000..e3b4a8c
--- /dev/null
+++ b/sys/amd64/include/cpufunc.h
@@ -0,0 +1,82 @@
+/*
+ * Functions to provide access to special i386 instructions.
+ * XXX - bezillions more are defined in locore.s but are not declared anywhere.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+#ifdef __GNUC__
+
+static __inline int bdb(void)
+{
+ extern int bdb_exists;
+
+ if (!bdb_exists)
+ return (0);
+ __asm("int $3");
+ return (1);
+}
+
+static __inline void
+disable_intr(void)
+{
+ __asm __volatile("cli");
+}
+
+static __inline void
+enable_intr(void)
+{
+ __asm __volatile("sti");
+}
+
+/*
+ * This roundabout method of returning a u_char helps stop gcc-1.40 from
+ * generating unnecessary movzbl's.
+ */
+#define inb(port) ((u_char) u_int_inb(port))
+
+static __inline u_int
+u_int_inb(u_int port)
+{
+ u_char data;
+ /*
+ * We use %%dx and not %1 here because i/o is done at %dx and not at
+ * %edx, while gcc-2.2.2 generates inferior code (movw instead of movl)
+ * if we tell it to load (u_short) port.
+ */
+ __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
+ return data;
+}
+
+static __inline void
+outb(u_int port, u_char data)
+{
+ register u_char al asm("ax");
+
+ al = data; /* help gcc-1.40's register allocator */
+ __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
+}
+
+#else /* not __GNUC__ */
+
+int bdb __P((void));
+void disable_intr __P((void));
+void enable_intr __P((void));
+u_char inb __P((u_int port));
+void outb __P((u_int port, u_int data)); /* XXX - incompat */
+
+#endif /* __GNUC__ */
+
+#define really_u_int int /* XXX */
+#define really_void int /* XXX */
+
+void load_cr0 __P((u_int cr0));
+really_u_int rcr0 __P((void));
+
+#ifdef notyet
+really_void setidt __P((int idx, /*XXX*/caddr_t func, int typ, int dpl));
+#endif
+
+#undef really_u_int
+#undef really_void
diff --git a/sys/amd64/include/db_machdep.h b/sys/amd64/include/db_machdep.h
new file mode 100644
index 0000000..8e37eec
--- /dev/null
+++ b/sys/amd64/include/db_machdep.h
@@ -0,0 +1,154 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: db_machdep.h,v $
+ * Revision 2.8 92/02/19 15:07:56 elf
+ * Added db_thread_fp_used.
+ * [92/02/19 rpd]
+ *
+ * Revision 2.7 91/10/09 16:06:28 af
+ * Revision 2.6.3.1 91/10/05 13:10:32 jeffreyh
+ * Added access and task name macros.
+ * [91/08/29 tak]
+ *
+ * Revision 2.6.3.1 91/10/05 13:10:32 jeffreyh
+ * Added access and task name macros.
+ * [91/08/29 tak]
+ *
+ * Revision 2.6 91/05/14 16:05:58 mrt
+ * Correcting copyright
+ *
+ * Revision 2.5 91/02/05 17:11:17 mrt
+ * Changed to new Mach copyright
+ * [91/02/01 17:31:24 mrt]
+ *
+ * Revision 2.4 91/01/08 15:10:16 rpd
+ * Added dummy inst_load/inst_store macros.
+ * [90/12/11 rpd]
+ *
+ * Revision 2.3 90/10/25 14:44:49 rwd
+ * Added watchpoint support.
+ * [90/10/18 rpd]
+ *
+ * Revision 2.2 90/08/27 21:56:15 dbg
+ * Created.
+ * [90/07/25 dbg]
+ *
+ */
+
+#ifndef _I386_DB_MACHDEP_H_
+#define _I386_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+
+/* #include <mach/i386/vm_types.h> */
+/* #include <mach/i386/vm_param.h> */
+#include <vm/vm_prot.h>
+#include <vm/vm_param.h>
+#include <vm/vm_inherit.h>
+#include <vm/lock.h>
+/* #include <i386/thread.h> */ /* for thread_status */
+#include <machine/frame.h> /* for struct trapframe */
+/* #include <i386/eflags.h> */
+#include <machine/eflags.h> /* from Mach... */
+/* #include <i386/trap.h> */
+#include <machine/trap.h>
+
+#define i386_saved_state trapframe
+/* end of mangling */
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef int db_expr_t; /* expression - signed */
+
+typedef struct i386_saved_state db_regs_t;
+db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->tf_eip)
+
+#define BKPT_INST 0xcc /* breakpoint instruction */
+#define BKPT_SIZE (1) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK ddb_regs.tf_eip -= 1;
+
+#define db_clear_single_step(regs) ((regs)->tf_eflags &= ~EFL_TF)
+#define db_set_single_step(regs) ((regs)->tf_eflags |= EFL_TF)
+
+/* #define IS_BREAKPOINT_TRAP(type, code) ((type) == T_INT3) */
+/* #define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT) */
+/* using the 386bsd values, rather than the Mach ones: */
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_BPTFLT)
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_KDBTRAP)
+
+#define I_CALL 0xe8
+#define I_CALLI 0xff
+#define I_RET 0xc3
+#define I_IRET 0xcf
+
+#define inst_trap_return(ins) (((ins)&0xff) == I_IRET)
+#define inst_return(ins) (((ins)&0xff) == I_RET)
+#define inst_call(ins) (((ins)&0xff) == I_CALL || \
+ (((ins)&0xff) == I_CALLI && \
+ ((ins)&0x3800) == 0x1000))
+#define inst_load(ins) 0
+#define inst_store(ins) 0
+
+/* access capability and access macros */
+
+#define DB_ACCESS_LEVEL 2 /* access any space */
+#define DB_CHECK_ACCESS(addr,size,task) \
+ db_check_access(addr,size,task)
+#define DB_PHYS_EQ(task1,addr1,task2,addr2) \
+ db_phys_eq(task1,addr1,task2,addr2)
+#define DB_VALID_KERN_ADDR(addr) \
+ ((addr) >= VM_MIN_KERNEL_ADDRESS && \
+ (addr) < VM_MAX_KERNEL_ADDRESS)
+#define DB_VALID_ADDRESS(addr,user) \
+ ((!(user) && DB_VALID_KERN_ADDR(addr)) || \
+ ((user) && (addr) < VM_MIN_KERNEL_ADDRESS))
+
+boolean_t db_check_access(/* vm_offset_t, int, task_t */);
+boolean_t db_phys_eq(/* task_t, vm_offset_t, task_t, vm_offset_t */);
+
+/* macros for printing OS server dependent task name */
+
+#define DB_TASK_NAME(task) db_task_name(task)
+#define DB_TASK_NAME_TITLE "COMMAND "
+#define DB_TASK_NAME_LEN 23
+#define DB_NULL_TASK_NAME "? "
+
+void db_task_name(/* task_t */);
+
+/* macro for checking if a thread has used floating-point */
+
+#define db_thread_fp_used(thread) ((thread)->pcb->ims.ifps != 0)
+
+#endif /* _I386_DB_MACHDEP_H_ */
diff --git a/sys/amd64/include/float.h b/sys/amd64/include/float.h
new file mode 100644
index 0000000..edfe0d9
--- /dev/null
+++ b/sys/amd64/include/float.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)float.h 7.1 (Berkeley) 5/8/90
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00086
+ * -------------------- ----- ----------------------
+ *
+ * 27 Feb 93 Handel/da Silva/Poirot Adjust value for MAX_DOUBLE
+ */
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS 1 /* FP addition rounds to nearest */
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP -125 /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP -37 /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP -1021
+#define DBL_MIN 2.225073858507201E-308
+#define DBL_MIN_10_EXP -307
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.797693134862315E+308
+#define DBL_MAX_10_EXP 308
+
+#define LDBL_MANT_DIG DBL_MANT_DIG
+#define LDBL_EPSILON DBL_EPSILON
+#define LDBL_DIG DBL_DIG
+#define LDBL_MIN_EXP DBL_MIN_EXP
+#define LDBL_MIN DBL_MIN
+#define LDBL_MIN_10_EXP DBL_MIN_10_EXP
+#define LDBL_MAX_EXP DBL_MAX_EXP
+#define LDBL_MAX DBL_MAX
+#define LDBL_MAX_10_EXP DBL_MAX_10_EXP
diff --git a/sys/amd64/include/fpu.h b/sys/amd64/include/fpu.h
new file mode 100644
index 0000000..134e0c1
--- /dev/null
+++ b/sys/amd64/include/fpu.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)npx.h 5.3 (Berkeley) 1/18/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00154
+ * -------------------- ----- ----------------------
+ *
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ *
+ */
+
+/*
+ * 287/387 NPX Coprocessor Data Structures and Constants
+ * W. Jolitz 1/90
+ */
+
+#ifndef ___NPX87___
+#define ___NPX87___
+
+/* Environment information of floating point unit */
+struct env87 {
+ long en_cw; /* control word (16bits) */
+ long en_sw; /* status word (16bits) */
+ long en_tw; /* tag word (16bits) */
+ long en_fip; /* floating point instruction pointer */
+ u_short en_fcs; /* floating code segment selector */
+ u_short en_opcode; /* opcode last executed (11 bits ) */
+ long en_foo; /* floating operand offset */
+ long en_fos; /* floating operand segment selector */
+};
+
+/* Contents of each floating point accumulator */
+struct fpacc87 {
+#ifdef dontdef /* too unportable */
+ u_long fp_mantlo; /* mantissa low (31:0) */
+ u_long fp_manthi; /* mantissa high (63:32) */
+ int fp_exp:15; /* exponent */
+ int fp_sgn:1; /* mantissa sign */
+#else
+ u_char fp_bytes[10];
+#endif
+};
+
+/* Floating point context */
+struct save87 {
+ struct env87 sv_env; /* floating point control/status */
+ struct fpacc87 sv_ac[8]; /* accumulator contents, 0-7 */
+#ifndef dontdef
+ u_long sv_ex_sw; /* status word for last exception (was pad) */
+ u_long sv_ex_tw; /* tag word for last exception (was pad) */
+ u_char sv_pad[8 * 2 - 2 * 4]; /* bogus historical padding */
+#endif
+};
+
+/* Cyrix EMC memory - mapped coprocessor context switch information */
+struct emcsts {
+ long em_msw; /* memory mapped status register when swtched */
+ long em_tar; /* memory mapped temp A register when swtched */
+ long em_dl; /* memory mapped D low register when swtched */
+};
+
+/* Intel prefers long real (53 bit) precision */
+#define __iBCS_NPXCW__ 0x262
+/* wfj prefers temporary real (64 bit) precision */
+#define __386BSD_NPXCW__ 0x362
+/*
+ * bde prefers 53 bit precision and all exceptions masked.
+ *
+ * The standard control word from finit is 0x37F, giving:
+ *
+ * round to nearest
+ * 64-bit precision
+ * all exceptions masked.
+ *
+ * Now I want:
+ *
+ * affine mode for 287's (if they work at all) (1 in bitfield 1<<12)
+ * 53-bit precision (2 in bitfield 3<<8)
+ * overflow exception unmasked (0 in bitfield 1<<3)
+ * zero divide exception unmasked (0 in bitfield 1<<2)
+ * invalid-operand exception unmasked (0 in bitfield 1<<0).
+ *
+ * 64-bit precision often gives bad results with high level languages
+ * because it makes the results of calculations depend on whether
+ * intermediate values are stored in memory or in FPU registers.
+ *
+ * The "Intel" and wfj control words have:
+ *
+ * underflow exception unmasked (0 in bitfield 1<<4)
+ *
+ * but that causes an unexpected exception in the test program 'paranoia'
+ * and makes denormals useless (DBL_MIN / 2 underflows). It doesn't make
+ * a lot of sense to trap underflow without trapping denormals.
+ *
+ * Later I will want the IEEE default of all exceptions masked. See the
+ * 0.0 math manpage for why this is better. The 0.1 math manpage is empty.
+ */
+#define __BDE_NPXCW__ 0x1272
+#define __BETTER_BDE_NPXCW__ 0x127f
+
+#ifdef __BROKEN_NPXCW__
+#ifdef __386BSD__
+#define __INITIAL_NPXCW__ __386BSD_NPXCW__
+#else
+#define __INITIAL_NPXCW__ __iBCS_NPXCW__
+#endif
+#else
+#define __INITIAL_NPXCW__ __BDE_NPXCW__
+#endif
+
+#endif ___NPX87___
diff --git a/sys/amd64/include/frame.h b/sys/amd64/include/frame.h
new file mode 100644
index 0000000..4dbabd1
--- /dev/null
+++ b/sys/amd64/include/frame.h
@@ -0,0 +1,116 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)frame.h 5.2 (Berkeley) 1/18/91
+ */
+
+/*
+ * System stack frames.
+ */
+
+/*
+ * Exception/Trap Stack Frame
+ */
+
+struct trapframe {
+ int tf_es;
+ int tf_ds;
+ int tf_edi;
+ int tf_esi;
+ int tf_ebp;
+ int tf_isp;
+ int tf_ebx;
+ int tf_edx;
+ int tf_ecx;
+ int tf_eax;
+ int tf_trapno;
+ /* below portion defined in 386 hardware */
+ int tf_err;
+ int tf_eip;
+ int tf_cs;
+ int tf_eflags;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int tf_esp;
+ int tf_ss;
+};
+
+/* Interrupt stack frame */
+
+struct intrframe {
+ int if_vec;
+ int if_ppl;
+ int if_es;
+ int if_ds;
+ int if_edi;
+ int if_esi;
+ int if_ebp;
+ int :32;
+ int if_ebx;
+ int if_edx;
+ int if_ecx;
+ int if_eax;
+ int :32; /* for compat with trap frame - trapno */
+ int :32; /* for compat with trap frame - err */
+ /* below portion defined in 386 hardware */
+ int if_eip;
+ int if_cs;
+ int if_eflags;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int if_esp;
+ int if_ss;
+};
+
+/*
+ * Call Gate/System Call Stack Frame
+ */
+
+struct syscframe {
+ int sf_edi;
+ int sf_esi;
+ int sf_ebp;
+ int :32; /* redundant save of isp */
+ int sf_ebx;
+ int sf_edx;
+ int sf_ecx;
+ int sf_eax;
+ int sf_eflags;
+ /* below portion defined in 386 hardware */
+/* int sf_args[N]; /* if call gate copy args enabled!*/
+ int sf_eip;
+ int sf_cs;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int sf_esp;
+ int sf_ss;
+};
diff --git a/sys/amd64/include/npx.h b/sys/amd64/include/npx.h
new file mode 100644
index 0000000..134e0c1
--- /dev/null
+++ b/sys/amd64/include/npx.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)npx.h 5.3 (Berkeley) 1/18/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00154
+ * -------------------- ----- ----------------------
+ *
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ *
+ */
+
+/*
+ * 287/387 NPX Coprocessor Data Structures and Constants
+ * W. Jolitz 1/90
+ */
+
+#ifndef ___NPX87___
+#define ___NPX87___
+
+/* Environment information of floating point unit */
+struct env87 {
+ long en_cw; /* control word (16bits) */
+ long en_sw; /* status word (16bits) */
+ long en_tw; /* tag word (16bits) */
+ long en_fip; /* floating point instruction pointer */
+ u_short en_fcs; /* floating code segment selector */
+ u_short en_opcode; /* opcode last executed (11 bits ) */
+ long en_foo; /* floating operand offset */
+ long en_fos; /* floating operand segment selector */
+};
+
+/* Contents of each floating point accumulator */
+struct fpacc87 {
+#ifdef dontdef /* too unportable */
+ u_long fp_mantlo; /* mantissa low (31:0) */
+ u_long fp_manthi; /* mantissa high (63:32) */
+ int fp_exp:15; /* exponent */
+ int fp_sgn:1; /* mantissa sign */
+#else
+ u_char fp_bytes[10];
+#endif
+};
+
+/* Floating point context */
+struct save87 {
+ struct env87 sv_env; /* floating point control/status */
+ struct fpacc87 sv_ac[8]; /* accumulator contents, 0-7 */
+#ifndef dontdef
+ u_long sv_ex_sw; /* status word for last exception (was pad) */
+ u_long sv_ex_tw; /* tag word for last exception (was pad) */
+ u_char sv_pad[8 * 2 - 2 * 4]; /* bogus historical padding */
+#endif
+};
+
+/* Cyrix EMC memory - mapped coprocessor context switch information */
+struct emcsts {
+ long em_msw; /* memory mapped status register when swtched */
+ long em_tar; /* memory mapped temp A register when swtched */
+ long em_dl; /* memory mapped D low register when swtched */
+};
+
+/* Intel prefers long real (53 bit) precision */
+#define __iBCS_NPXCW__ 0x262
+/* wfj prefers temporary real (64 bit) precision */
+#define __386BSD_NPXCW__ 0x362
+/*
+ * bde prefers 53 bit precision and all exceptions masked.
+ *
+ * The standard control word from finit is 0x37F, giving:
+ *
+ * round to nearest
+ * 64-bit precision
+ * all exceptions masked.
+ *
+ * Now I want:
+ *
+ * affine mode for 287's (if they work at all) (1 in bitfield 1<<12)
+ * 53-bit precision (2 in bitfield 3<<8)
+ * overflow exception unmasked (0 in bitfield 1<<3)
+ * zero divide exception unmasked (0 in bitfield 1<<2)
+ * invalid-operand exception unmasked (0 in bitfield 1<<0).
+ *
+ * 64-bit precision often gives bad results with high level languages
+ * because it makes the results of calculations depend on whether
+ * intermediate values are stored in memory or in FPU registers.
+ *
+ * The "Intel" and wfj control words have:
+ *
+ * underflow exception unmasked (0 in bitfield 1<<4)
+ *
+ * but that causes an unexpected exception in the test program 'paranoia'
+ * and makes denormals useless (DBL_MIN / 2 underflows). It doesn't make
+ * a lot of sense to trap underflow without trapping denormals.
+ *
+ * Later I will want the IEEE default of all exceptions masked. See the
+ * 0.0 math manpage for why this is better. The 0.1 math manpage is empty.
+ */
+#define __BDE_NPXCW__ 0x1272
+#define __BETTER_BDE_NPXCW__ 0x127f
+
+#ifdef __BROKEN_NPXCW__
+#ifdef __386BSD__
+#define __INITIAL_NPXCW__ __386BSD_NPXCW__
+#else
+#define __INITIAL_NPXCW__ __iBCS_NPXCW__
+#endif
+#else
+#define __INITIAL_NPXCW__ __BDE_NPXCW__
+#endif
+
+#endif ___NPX87___
diff --git a/sys/amd64/include/pc/display.h b/sys/amd64/include/pc/display.h
new file mode 100644
index 0000000..cab46e4
--- /dev/null
+++ b/sys/amd64/include/pc/display.h
@@ -0,0 +1,43 @@
+/*
+ * IBM PC display definitions
+ */
+
+/* Color attributes for foreground text */
+
+#define FG_BLACK 0
+#define FG_BLUE 1
+#define FG_GREEN 2
+#define FG_CYAN 3
+#define FG_RED 4
+#define FG_MAGENTA 5
+#define FG_BROWN 6
+#define FG_LIGHTGREY 7
+#define FG_DARKGREY 8
+#define FG_LIGHTBLUE 9
+#define FG_LIGHTGREEN 10
+#define FG_LIGHTCYAN 11
+#define FG_LIGHTRED 12
+#define FG_LIGHTMAGENTA 13
+#define FG_YELLOW 14
+#define FG_WHITE 15
+#define FG_BLINK 0x80
+
+/* Color attributes for text background */
+
+#define BG_BLACK 0x00
+#define BG_BLUE 0x10
+#define BG_GREEN 0x20
+#define BG_CYAN 0x30
+#define BG_RED 0x40
+#define BG_MAGENTA 0x50
+#define BG_BROWN 0x60
+#define BG_LIGHTGREY 0x70
+
+/* Monochrome attributes for foreground text */
+
+#define FG_UNDERLINE 0x01
+#define FG_INTENSE 0x08
+
+/* Monochrome attributes for text background */
+
+#define BG_INTENSE 0x10
diff --git a/sys/amd64/include/pcb.h b/sys/amd64/include/pcb.h
new file mode 100644
index 0000000..92bd810
--- /dev/null
+++ b/sys/amd64/include/pcb.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pcb.h 5.10 (Berkeley) 5/12/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00154
+ * -------------------- ----- ----------------------
+ *
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ *
+ */
+
+/*
+ * Intel 386 process control block
+ */
+#include "machine/tss.h"
+#include "machine/npx.h"
+
+struct pcb {
+ struct i386tss pcb_tss;
+#define pcb_ksp pcb_tss.tss_esp0
+#define pcb_ptd pcb_tss.tss_cr3
+#define pcb_cr3 pcb_ptd
+#define pcb_pc pcb_tss.tss_eip
+#define pcb_psl pcb_tss.tss_eflags
+#define pcb_usp pcb_tss.tss_esp
+#define pcb_fp pcb_tss.tss_ebp
+#ifdef notyet
+ u_char pcb_iomap[NPORT/sizeof(u_char)]; /* i/o port bitmap */
+#endif
+ struct save87 pcb_savefpu; /* floating point state for 287/387 */
+ struct emcsts pcb_saveemc; /* Cyrix EMC state */
+/*
+ * Software pcb (extension)
+ */
+ int pcb_flags;
+#ifdef notused
+#define FP_WASUSED 0x01 /* process has used fltng pnt hardware */
+#define FP_NEEDSSAVE 0x02 /* ... that needs save on next context switch */
+#define FP_NEEDSRESTORE 0x04 /* ... that needs restore on next DNA fault */
+#endif
+#define FP_USESEMC 0x08 /* process uses EMC memory-mapped mode */
+#define FM_TRAP 0x10 /* process entered kernel on a trap frame */
+#define FP_SOFTFP 0x20 /* process using software fltng pnt emulator */
+ short pcb_iml; /* interrupt mask level */
+ caddr_t pcb_onfault; /* copyin/out fault recovery */
+ long pcb_sigc[8]; /* XXX signal code trampoline */
+ int pcb_cmap2; /* XXX temporary PTE - will prefault instead */
+};
+
+#ifdef KERNEL
+struct pcb *curpcb; /* our current running pcb */
+#endif
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
new file mode 100644
index 0000000..2eff22f
--- /dev/null
+++ b/sys/amd64/include/pmap.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.h 7.4 (Berkeley) 5/12/91
+ */
+
+/*
+ * Derived from hp300 version by Mike Hibler, this version by William
+ * Jolitz uses a recursive map [a pde points to the page directory] to
+ * map the page tables using the pagetables themselves. This is done to
+ * reduce the impact on kernel virtual memory for lots of sparse address
+ * space, and to reduce the cost of memory to each process.
+ *
+ * from hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
+ */
+
+#ifndef _PMAP_MACHINE_
+#define _PMAP_MACHINE_ 1
+
+/*
+ * 386 page table entry and page table directory
+ * W.Jolitz, 8/89
+ */
+
+struct pde
+{
+unsigned int
+ pd_v:1, /* valid bit */
+ pd_prot:2, /* access control */
+ pd_mbz1:2, /* reserved, must be zero */
+ pd_u:1, /* hardware maintained 'used' bit */
+ :1, /* not used */
+ pd_mbz2:2, /* reserved, must be zero */
+ :3, /* reserved for software */
+ pd_pfnum:20; /* physical page frame number of pte's*/
+};
+
+#define PD_MASK 0xffc00000 /* page directory address bits */
+#define PT_MASK 0x003ff000 /* page table address bits */
+#define PD_SHIFT 22 /* page directory address shift */
+#define PG_SHIFT 12 /* page table address shift */
+
+struct pte
+{
+unsigned int
+ pg_v:1, /* valid bit */
+ pg_prot:2, /* access control */
+ pg_mbz1:2, /* reserved, must be zero */
+ pg_u:1, /* hardware maintained 'used' bit */
+ pg_m:1, /* hardware maintained modified bit */
+ pg_mbz2:2, /* reserved, must be zero */
+ pg_w:1, /* software, wired down page */
+ :1, /* software (unused) */
+ pg_nc:1, /* 'uncacheable page' bit */
+ pg_pfnum:20; /* physical page frame number */
+};
+
+#define PG_V 0x00000001
+#define PG_RO 0x00000000
+#define PG_RW 0x00000002
+#define PG_u 0x00000004
+#define PG_PROT 0x00000006 /* all protection bits . */
+#define PG_W 0x00000200
+#define PG_N 0x00000800 /* Non-cacheable */
+#define PG_M 0x00000040
+#define PG_U 0x00000020
+#define PG_FRAME 0xfffff000
+
+#define PG_NOACC 0
+#define PG_KR 0x00000000
+#define PG_KW 0x00000002
+#define PG_URKR 0x00000004
+#define PG_URKW 0x00000004
+#define PG_UW 0x00000006
+
+/* Garbage for current bastardized pager that assumes a hp300 */
+#define PG_NV 0
+#define PG_CI 0
+/*
+ * Page Protection Exception bits
+ */
+
+#define PGEX_P 0x01 /* Protection violation vs. not present */
+#define PGEX_W 0x02 /* during a Write cycle */
+#define PGEX_U 0x04 /* access from User mode (UPL) */
+
+typedef struct pde pd_entry_t; /* page directory entry */
+typedef struct pte pt_entry_t; /* Mach page table entry */
+
+/*
+ * One page directory, shared between
+ * kernel and user modes.
+ */
+#define I386_PAGE_SIZE NBPG
+#define I386_PDR_SIZE NBPDR
+
+#define I386_KPDES 8 /* KPT page directory size */
+#define I386_UPDES NBPDR/sizeof(struct pde)-8 /* UPT page directory size */
+
+#define UPTDI 0x3f6 /* ptd entry for u./kernel&user stack */
+#define PTDPTDI 0x3f7 /* ptd entry that points to ptd! */
+#define KPTDI_FIRST 0x3f8 /* start of kernel virtual pde's */
+#define KPTDI_LAST 0x3fA /* last of kernel virtual pde's */
+
+/*
+ * Address of current and alternate address space page table maps
+ * and directories.
+ */
+#ifdef KERNEL
+extern struct pte PTmap[], APTmap[], Upte;
+extern struct pde PTD[], APTD[], PTDpde, APTDpde, Upde;
+extern pt_entry_t *Sysmap;
+
+extern int IdlePTD; /* physical address of "Idle" state directory */
+#endif
+
+/*
+ * virtual address to page table entry and
+ * to physical address. Likewise for alternate address space.
+ * Note: these work recursively, thus vtopte of a pte will give
+ * the corresponding pde that in turn maps it.
+ */
+#define vtopte(va) (PTmap + i386_btop(va))
+#define kvtopte(va) vtopte(va)
+#define ptetov(pt) (i386_ptob(pt - PTmap))
+#define vtophys(va) (i386_ptob(vtopte(va)->pg_pfnum) | ((int)(va) & PGOFSET))
+#define ispt(va) ((va) >= UPT_MIN_ADDRESS && (va) <= KPT_MAX_ADDRESS)
+
+#define avtopte(va) (APTmap + i386_btop(va))
+#define ptetoav(pt) (i386_ptob(pt - APTmap))
+#define avtophys(va) (i386_ptob(avtopte(va)->pg_pfnum) | ((int)(va) & PGOFSET))
+
+/*
+ * macros to generate page directory/table indicies
+ */
+
+#define pdei(va) (((va)&PD_MASK)>>PD_SHIFT)
+#define ptei(va) (((va)&PT_MASK)>>PG_SHIFT)
+
+/*
+ * Pmap stuff
+ */
+
+struct pmap {
+ pd_entry_t *pm_pdir; /* KVA of page directory */
+ boolean_t pm_pdchanged; /* pdir changed */
+ short pm_dref; /* page directory ref count */
+ short pm_count; /* pmap reference count */
+ simple_lock_data_t pm_lock; /* lock on pmap */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ long pm_ptpages; /* more stats: PT pages */
+};
+
+typedef struct pmap *pmap_t;
+
+#ifdef KERNEL
+extern pmap_t kernel_pmap;
+#endif
+
+/*
+ * Macros for speed
+ */
+#define PMAP_ACTIVATE(pmapp, pcbp) \
+ if ((pmapp) != NULL /*&& (pmapp)->pm_pdchanged */) { \
+ (pcbp)->pcb_cr3 = \
+ pmap_extract(kernel_pmap, (pmapp)->pm_pdir); \
+ if ((pmapp) == &curproc->p_vmspace->vm_pmap) \
+ load_cr3((pcbp)->pcb_cr3); \
+ (pmapp)->pm_pdchanged = FALSE; \
+ }
+
+#define PMAP_DEACTIVATE(pmapp, pcbp)
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ pmap_t pv_pmap; /* pmap where mapping lies */
+ vm_offset_t pv_va; /* virtual address for mapping */
+ int pv_flags; /* flags */
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+#define PV_CI 0x01 /* all entries must be cache inhibited */
+#define PV_PTPAGE 0x02 /* entry maps a page table page */
+
+#ifdef KERNEL
+
+pv_entry_t pv_table; /* array of entries, one per page */
+
+#define pa_index(pa) atop(pa - vm_first_phys)
+#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+
+#endif KERNEL
+
+#endif _PMAP_MACHINE_
diff --git a/sys/amd64/include/proc.h b/sys/amd64/include/proc.h
new file mode 100644
index 0000000..02f3c01
--- /dev/null
+++ b/sys/amd64/include/proc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)proc.h 7.1 (Berkeley) 5/15/91
+ */
+
+/*
+ * Machine-dependent part of the proc structure for hp300.
+ */
+struct mdproc {
+ int md_flags; /* machine-dependent flags */
+#ifdef notyet
+ int *p_regs; /* registers on current frame */
+#endif
+};
+
+/* md_flags */
+#define MDP_AST 0x0001 /* async trap pending */
diff --git a/sys/amd64/include/psl.h b/sys/amd64/include/psl.h
new file mode 100644
index 0000000..aee73ed
--- /dev/null
+++ b/sys/amd64/include/psl.h
@@ -0,0 +1,60 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)psl.h 5.2 (Berkeley) 1/18/91
+ */
+
+/*
+ * 386 processor status longword.
+ */
+#define PSL_C 0x00000001 /* carry bit */
+#define PSL_PF 0x00000004 /* parity bit */
+#define PSL_AF 0x00000010 /* bcd carry bit */
+#define PSL_Z 0x00000040 /* zero bit */
+#define PSL_N 0x00000080 /* negative bit */
+#define PSL_T 0x00000100 /* trace enable bit */
+#define PSL_I 0x00000200 /* interrupt enable bit */
+#define PSL_D 0x00000400 /* string instruction direction bit */
+#define PSL_V 0x00000800 /* overflow bit */
+#define PSL_IOPL 0x00003000 /* i/o priviledge level enable */
+#define PSL_NT 0x00004000 /* nested task bit */
+#define PSL_RF 0x00010000 /* restart flag bit */
+#define PSL_VM 0x00020000 /* virtual 8086 mode bit */
+
+#define PSL_MBZ 0xfffc7fb7 /* must be zero bits */
+#define PSL_MBO 0x00000002 /* must be one bits */
+
+#define PSL_USERSET (PSL_IOPL)
+#define PSL_USERCLR (PSL_I|PSL_NT)
diff --git a/sys/amd64/include/reg.h b/sys/amd64/include/reg.h
new file mode 100644
index 0000000..bc2f05c
--- /dev/null
+++ b/sys/amd64/include/reg.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)reg.h 5.5 (Berkeley) 1/18/91
+ */
+
+/*
+ * Location of the users' stored
+ * registers within appropriate frame of 'trap' and 'syscall', relative to
+ * base of stack frame.
+ * Normal usage is u.u_ar0[XX] in kernel.
+ */
+
+/* When referenced during a trap/exception, registers are at these offsets */
+
+#define tES (0)
+#define tDS (1)
+#define tEDI (2)
+#define tESI (3)
+#define tEBP (4)
+
+#define tEBX (6)
+#define tEDX (7)
+#define tECX (8)
+#define tEAX (9)
+
+#define tEIP (12)
+#define tCS (13)
+#define tEFLAGS (14)
+#define tESP (15)
+#define tSS (16)
+
+/* During a system call, registers are at these offsets instead of above. */
+
+#define sEDI (0)
+#define sESI (1)
+#define sEBP (2)
+
+#define sEBX (4)
+#define sEDX (5)
+#define sECX (6)
+#define sEAX (7)
+#define sEFLAGS (8)
+#define sEIP (9)
+#define sCS (10)
+#define sESP (11)
+#define sSS (12)
+
+#define PC sEIP
+#define SP sESP
+#define PS sEFLAGS
+#define R0 sEDX
+#define R1 sECX
+/*
+ * Registers accessible to ptrace(2) syscall for debugger
+ */
+#ifdef IPCREG
+#define NIPCREG 14
+int ipcreg[NIPCREG] =
+ { tES,tDS,tEDI,tESI,tEBP,tEBX,tEDX,tECX,tEAX,tEIP,tCS,tEFLAGS,tESP,tSS };
+#endif
diff --git a/sys/amd64/include/segments.h b/sys/amd64/include/segments.h
new file mode 100644
index 0000000..0456e59
--- /dev/null
+++ b/sys/amd64/include/segments.h
@@ -0,0 +1,196 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)segments.h 7.1 (Berkeley) 5/9/91
+ */
+
+/*
+ * 386 Segmentation Data Structures and definitions
+ * William F. Jolitz (william@ernie.berkeley.edu) 6/20/1989
+ */
+
+/*
+ * Selectors
+ */
+
+#define ISPL(s) ((s)&3) /* what is the priority level of a selector */
+#define SEL_KPL 0 /* kernel priority level */
+#define SEL_UPL 3 /* user priority level */
+#define ISLDT(s) ((s)&SEL_LDT) /* is it local or global */
+#define SEL_LDT 4 /* local descriptor table */
+#define IDXSEL(s) (((s)>>3) & 0x1fff) /* index of selector */
+#define LSEL(s,r) (((s)<<3) | SEL_LDT | r) /* a local selector */
+#define GSEL(s,r) (((s)<<3) | r) /* a global selector */
+
+/*
+ * Memory and System segment descriptors
+ */
+struct segment_descriptor {
+ unsigned sd_lolimit:16 ; /* segment extent (lsb) */
+ unsigned sd_lobase:24 ; /* segment base address (lsb) */
+ unsigned sd_type:5 ; /* segment type */
+ unsigned sd_dpl:2 ; /* segment descriptor priority level */
+ unsigned sd_p:1 ; /* segment descriptor present */
+ unsigned sd_hilimit:4 ; /* segment extent (msb) */
+ unsigned sd_xx:2 ; /* unused */
+ unsigned sd_def32:1 ; /* default 32 vs 16 bit size */
+ unsigned sd_gran:1 ; /* limit granularity (byte/page units)*/
+ unsigned sd_hibase:8 ; /* segment base address (msb) */
+} ;
+
+/*
+ * Gate descriptors (e.g. indirect descriptors)
+ */
+struct gate_descriptor {
+ unsigned gd_looffset:16 ; /* gate offset (lsb) */
+ unsigned gd_selector:16 ; /* gate segment selector */
+ unsigned gd_stkcpy:5 ; /* number of stack wds to cpy */
+ unsigned gd_xx:3 ; /* unused */
+ unsigned gd_type:5 ; /* segment type */
+ unsigned gd_dpl:2 ; /* segment descriptor priority level */
+ unsigned gd_p:1 ; /* segment descriptor present */
+ unsigned gd_hioffset:16 ; /* gate offset (msb) */
+} ;
+
+/*
+ * Generic descriptor
+ */
+union descriptor {
+ struct segment_descriptor sd;
+ struct gate_descriptor gd;
+};
+
+ /* system segments and gate types */
+#define SDT_SYSNULL 0 /* system null */
+#define SDT_SYS286TSS 1 /* system 286 TSS available */
+#define SDT_SYSLDT 2 /* system local descriptor table */
+#define SDT_SYS286BSY 3 /* system 286 TSS busy */
+#define SDT_SYS286CGT 4 /* system 286 call gate */
+#define SDT_SYSTASKGT 5 /* system task gate */
+#define SDT_SYS286IGT 6 /* system 286 interrupt gate */
+#define SDT_SYS286TGT 7 /* system 286 trap gate */
+#define SDT_SYSNULL2 8 /* system null again */
+#define SDT_SYS386TSS 9 /* system 386 TSS available */
+#define SDT_SYSNULL3 10 /* system null again */
+#define SDT_SYS386BSY 11 /* system 386 TSS busy */
+#define SDT_SYS386CGT 12 /* system 386 call gate */
+#define SDT_SYSNULL4 13 /* system null again */
+#define SDT_SYS386IGT 14 /* system 386 interrupt gate */
+#define SDT_SYS386TGT 15 /* system 386 trap gate */
+
+ /* memory segment types */
+#define SDT_MEMRO 16 /* memory read only */
+#define SDT_MEMROA 17 /* memory read only accessed */
+#define SDT_MEMRW 18 /* memory read write */
+#define SDT_MEMRWA 19 /* memory read write accessed */
+#define SDT_MEMROD 20 /* memory read only expand dwn limit */
+#define SDT_MEMRODA 21 /* memory read only expand dwn limit accessed */
+#define SDT_MEMRWD 22 /* memory read write expand dwn limit */
+#define SDT_MEMRWDA 23 /* memory read write expand dwn limit acessed */
+#define SDT_MEME 24 /* memory execute only */
+#define SDT_MEMEA 25 /* memory execute only accessed */
+#define SDT_MEMER 26 /* memory execute read */
+#define SDT_MEMERA 27 /* memory execute read accessed */
+#define SDT_MEMEC 28 /* memory execute only conforming */
+#define SDT_MEMEAC 29 /* memory execute only accessed conforming */
+#define SDT_MEMERC 30 /* memory execute read conforming */
+#define SDT_MEMERAC 31 /* memory execute read accessed conforming */
+
+/* is memory segment descriptor pointer ? */
+#define ISMEMSDP(s) ((s->d_type) >= SDT_MEMRO && (s->d_type) <= SDT_MEMERAC)
+
+/* is 286 gate descriptor pointer ? */
+#define IS286GDP(s) (((s->d_type) >= SDT_SYS286CGT \
+ && (s->d_type) < SDT_SYS286TGT))
+
+/* is 386 gate descriptor pointer ? */
+#define IS386GDP(s) (((s->d_type) >= SDT_SYS386CGT \
+ && (s->d_type) < SDT_SYS386TGT))
+
+/* is gate descriptor pointer ? */
+#define ISGDP(s) (IS286GDP(s) || IS386GDP(s))
+
+/* is segment descriptor pointer ? */
+#define ISSDP(s) (ISMEMSDP(s) || !ISGDP(s))
+
+/* is system segment descriptor pointer ? */
+#define ISSYSSDP(s) (!ISMEMSDP(s) && !ISGDP(s))
+
+/*
+ * Software definitions are in this convenient format,
+ * which are translated into inconvenient segment descriptors
+ * when needed to be used by the 386 hardware
+ */
+
+struct soft_segment_descriptor {
+ unsigned ssd_base ; /* segment base address */
+ unsigned ssd_limit ; /* segment extent */
+ unsigned ssd_type:5 ; /* segment type */
+ unsigned ssd_dpl:2 ; /* segment descriptor priority level */
+ unsigned ssd_p:1 ; /* segment descriptor present */
+ unsigned ssd_xx:4 ; /* unused */
+ unsigned ssd_xx1:2 ; /* unused */
+ unsigned ssd_def32:1 ; /* default 32 vs 16 bit size */
+ unsigned ssd_gran:1 ; /* limit granularity (byte/page units)*/
+};
+
+extern ssdtosd() ; /* to decode a ssd */
+extern sdtossd() ; /* to encode a sd */
+
+/*
+ * region descriptors, used to load gdt/idt tables before segments yet exist.
+ */
+struct region_descriptor {
+ unsigned rd_limit:16; /* segment extent */
+ unsigned rd_base:32; /* base address */
+};
+
+/*
+ * Segment Protection Exception code bits
+ */
+
+#define SEGEX_EXT 0x01 /* recursive or externally induced */
+#define SEGEX_IDT 0x02 /* interrupt descriptor table */
+#define SEGEX_TI 0x04 /* local descriptor table */
+ /* other bits are affected descriptor index */
+#define SEGEX_IDX(s) ((s)>>3)&0x1fff)
+
+/*
+ * Size of IDT table
+ */
+
+#define NIDT 256
+#define NRSVIDT 32 /* reserved entries for cpu exceptions */
diff --git a/sys/amd64/include/specialreg.h b/sys/amd64/include/specialreg.h
new file mode 100644
index 0000000..d1908c9
--- /dev/null
+++ b/sys/amd64/include/specialreg.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)specialreg.h 7.1 (Berkeley) 5/9/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00154
+ * -------------------- ----- ----------------------
+ *
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ *
+ */
+
+/*
+ * Bits in 386 special registers:
+ */
+
+#define CR0_PE 0x00000001 /* Protected mode Enable */
+#define CR0_MP 0x00000002 /* "Math" Present (NPX or NPX emulator) */
+#ifdef notused
+#define CR0_EM 0x00000004 /* EMulate non-NPX coproc. (trap ESC only) */
+#endif
+#define CR0_TS 0x00000008 /* Task Switched (if MP, trap ESC and WAIT) */
+#ifdef notused
+#define CR0_ET 0x00000010 /* Extension Type (387 (if set) vs 287) */
+#endif
+#define CR0_PG 0x80000000 /* PaGing enable */
+
+/*
+ * Bits in 486 special registers:
+ */
+
+#define CR0_NE 0x00000020 /* Numeric Error enable (EX16 vs IRQ13) */
+#define CR0_WP 0x00010000 /* Write Protect (honor ~PG_W in all modes) */
+#ifdef notyet
+#define CR0_AM 0x00040000 /* Alignment Mask (set to enable AC flag) */
+#endif
diff --git a/sys/amd64/include/trap.h b/sys/amd64/include/trap.h
new file mode 100644
index 0000000..6015605
--- /dev/null
+++ b/sys/amd64/include/trap.h
@@ -0,0 +1,96 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)trap.h 5.4 (Berkeley) 5/9/91
+ */
+
+/*
+ * Trap type values
+ * also known in trap.c for name strings
+ */
+
+#define T_RESADFLT 0 /* reserved addressing */
+#define T_PRIVINFLT 1 /* privileged instruction */
+#define T_RESOPFLT 2 /* reserved operand */
+#define T_BPTFLT 3 /* breakpoint instruction */
+#define T_SYSCALL 5 /* system call (kcall) */
+#define T_ARITHTRAP 6 /* arithmetic trap */
+#define T_ASTFLT 7 /* system forced exception */
+#define T_SEGFLT 8 /* segmentation (limit) fault */
+#define T_PROTFLT 9 /* protection fault */
+#define T_TRCTRAP 10 /* trace trap */
+#define T_PAGEFLT 12 /* page fault */
+#define T_TABLEFLT 13 /* page table fault */
+#define T_ALIGNFLT 14 /* alignment fault */
+#define T_KSPNOTVAL 15 /* kernel stack pointer not valid */
+#define T_BUSERR 16 /* bus error */
+#define T_KDBTRAP 17 /* kernel debugger trap */
+
+#define T_DIVIDE 18 /* integer divide fault */
+#define T_NMI 19 /* non-maskable trap */
+#define T_OFLOW 20 /* overflow trap */
+#define T_BOUND 21 /* bound instruction fault */
+#define T_DNA 22 /* device not available fault */
+#define T_DOUBLEFLT 23 /* double fault */
+#define T_FPOPFLT 24 /* fp coprocessor operand fetch fault */
+#define T_TSSFLT 25 /* invalid tss fault */
+#define T_SEGNPFLT 26 /* segment not present fault */
+#define T_STKFLT 27 /* stack fault */
+#define T_RESERVED 28 /* reserved fault base */
+
+/* definitions for <sys/signal.h> */
+#define ILL_RESAD_FAULT T_RESADFLT
+#define ILL_PRIVIN_FAULT T_PRIVINFLT
+#define ILL_RESOP_FAULT T_RESOPFLT
+#define ILL_ALIGN_FAULT T_ALIGNFLT
+#define ILL_FPOP_FAULT T_FPOPFLT /* coprocessor operand fault */
+
+/* codes for SIGFPE/ARITHTRAP */
+#define FPE_INTOVF_TRAP 0x1 /* integer overflow */
+#define FPE_INTDIV_TRAP 0x2 /* integer divide by zero */
+#define FPE_FLTDIV_TRAP 0x3 /* floating/decimal divide by zero */
+#define FPE_FLTOVF_TRAP 0x4 /* floating overflow */
+#define FPE_FLTUND_TRAP 0x5 /* floating underflow */
+#define FPE_FPU_NP_TRAP 0x6 /* floating point unit not present */
+#define FPE_SUBRNG_TRAP 0x7 /* subrange out of bounds */
+
+/* codes for SIGBUS */
+#define BUS_PAGE_FAULT T_PAGEFLT /* page fault protection base */
+#define BUS_SEGNP_FAULT T_SEGNPFLT /* segment not present */
+#define BUS_STK_FAULT T_STKFLT /* stack segment */
+#define BUS_SEGM_FAULT T_RESERVED /* segment protection base */
+
+/* Trap's coming from user mode */
+#define T_USER 0x100
diff --git a/sys/amd64/include/tss.h b/sys/amd64/include/tss.h
new file mode 100644
index 0000000..8ba140d
--- /dev/null
+++ b/sys/amd64/include/tss.h
@@ -0,0 +1,78 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tss.h 5.4 (Berkeley) 1/18/91
+ */
+
+/*
+ * Intel 386 Context Data Type
+ */
+
+struct i386tss {
+ int tss_link; /* actually 16 bits: top 16 bits must be zero */
+ int tss_esp0; /* kernel stack pointer priviledge level 0 */
+#define tss_ksp tss_esp0
+ int tss_ss0; /* actually 16 bits: top 16 bits must be zero */
+ int tss_esp1; /* kernel stack pointer priviledge level 1 */
+ int tss_ss1; /* actually 16 bits: top 16 bits must be zero */
+ int tss_esp2; /* kernel stack pointer priviledge level 2 */
+ int tss_ss2; /* actually 16 bits: top 16 bits must be zero */
+ /* struct ptd *tss_cr3; /* page table directory */
+ int tss_cr3; /* page table directory */
+#define tss_ptd tss_cr3
+ int tss_eip; /* program counter */
+#define tss_pc tss_eip
+ int tss_eflags; /* program status longword */
+#define tss_psl tss_eflags
+ int tss_eax;
+ int tss_ecx;
+ int tss_edx;
+ int tss_ebx;
+ int tss_esp; /* user stack pointer */
+#define tss_usp tss_esp
+ int tss_ebp; /* user frame pointer */
+#define tss_fp tss_ebp
+ int tss_esi;
+ int tss_edi;
+ int tss_es; /* actually 16 bits: top 16 bits must be zero */
+ int tss_cs; /* actually 16 bits: top 16 bits must be zero */
+ int tss_ss; /* actually 16 bits: top 16 bits must be zero */
+ int tss_ds; /* actually 16 bits: top 16 bits must be zero */
+ int tss_fs; /* actually 16 bits: top 16 bits must be zero */
+ int tss_gs; /* actually 16 bits: top 16 bits must be zero */
+ int tss_ldt; /* actually 16 bits: top 16 bits must be zero */
+ int tss_ioopt; /* options & io offset bitmap: currently zero */
+ /* XXX unimplemented .. i/o permission bitmap */
+};
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
new file mode 100644
index 0000000..39403d6
--- /dev/null
+++ b/sys/amd64/include/vmparam.h
@@ -0,0 +1,256 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)vmparam.h 5.9 (Berkeley) 5/12/91
+ */
+
+
+/*
+ * Machine dependent constants for 386.
+ */
+
+/*
+ * Virtual address space arrangement. On 386, both user and kernel
+ * share the address space, not unlike the vax.
+ * USRTEXT is the start of the user text/data space, while USRSTACK
+ * is the top (end) of the user stack. Immediately above the user stack
+ * resides the user structure, which is UPAGES long and contains the
+ * kernel stack.
+ *
+ * Immediately after the user structure is the page table map, and then
+ * kernal address space.
+ */
+#define USRTEXT 0
+#define USRSTACK 0xFDBFE000
+#define BTOPUSRSTACK (0xFDC00-(UPAGES)) /* btop(USRSTACK) */
+#define LOWPAGES 0
+#define HIGHPAGES UPAGES
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#define MAXTSIZ (6*1024*1024) /* max text size */
+#ifndef DFLDSIZ
+#define DFLDSIZ (6*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (32*1024*1024) /* max data size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (512*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ MAXDSIZ /* max stack size */
+#endif
+
+/*
+ * Default sizes of swap allocation chunks (see dmap.h).
+ * The actual values may be changed in vminit() based on MAXDSIZ.
+ * With MAXDSIZ of 16Mb and NDMAP of 38, dmmax will be 1024.
+ */
+#define DMMIN 32 /* smallest swap allocation */
+#define DMMAX 4096 /* largest potential swap allocation */
+#define DMTEXT 1024 /* swap allocation for text */
+
+/*
+ * Sizes of the system and user portions of the system page table.
+ */
+#define SYSPTSIZE (2*NPTEPG)
+#define USRPTSIZE (2*NPTEPG)
+
+/*
+ * Size of User Raw I/O map
+ */
+#define USRIOSIZE 300
+
+/*
+ * The size of the clock loop.
+ */
+#define LOOPPAGES (maxfree - firstfree)
+
+/*
+ * The time for a process to be blocked before being very swappable.
+ * This is a number of seconds which the system takes as being a non-trivial
+ * amount of real time. You probably shouldn't change this;
+ * it is used in subtle ways (fractions and multiples of it are, that is, like
+ * half of a ``long time'', almost a long time, etc.)
+ * It is related to human patience and other factors which don't really
+ * change over time.
+ */
+#define MAXSLP 20
+
+/*
+ * A swapped in process is given a small amount of core without being bothered
+ * by the page replacement algorithm. Basically this says that if you are
+ * swapped in you deserve some resources. We protect the last SAFERSS
+ * pages against paging and will just swap you out rather than paging you.
+ * Note that each process has at least UPAGES+CLSIZE pages which are not
+ * paged anyways (this is currently 8+2=10 pages or 5k bytes), so this
+ * number just means a swapped in process is given around 25k bytes.
+ * Just for fun: current memory prices are 4600$ a megabyte on VAX (4/22/81),
+ * so we loan each swapped in process memory worth 100$, or just admit
+ * that we don't consider it worthwhile and swap it out to disk which costs
+ * $30/mb or about $0.75.
+ * { wfj 6/16/89: Retail AT memory expansion $800/megabyte, loan of $17
+ * on disk costing $7/mb or $0.18 (in memory still 100:1 in cost!) }
+ */
+#define SAFERSS 8 /* nominal ``small'' resident set size
+ protected against replacement */
+
+/*
+ * DISKRPM is used to estimate the number of paging i/o operations
+ * which one can expect from a single disk controller.
+ */
+#define DISKRPM 60
+
+/*
+ * Klustering constants. Klustering is the gathering
+ * of pages together for pagein/pageout, while clustering
+ * is the treatment of hardware page size as though it were
+ * larger than it really is.
+ *
+ * KLMAX gives maximum cluster size in CLSIZE page (cluster-page)
+ * units. Note that KLMAX*CLSIZE must be <= DMMIN in dmap.h.
+ */
+
+#define KLMAX (4/CLSIZE)
+#define KLSEQL (2/CLSIZE) /* in klust if vadvise(VA_SEQL) */
+#define KLIN (4/CLSIZE) /* default data/stack in klust */
+#define KLTXT (4/CLSIZE) /* default text in klust */
+#define KLOUT (4/CLSIZE)
+
+/*
+ * KLSDIST is the advance or retard of the fifo reclaim for sequential
+ * processes data space.
+ */
+#define KLSDIST 3 /* klusters advance/retard for seq. fifo */
+
+/*
+ * Paging thresholds (see vm_sched.c).
+ * Strategy of 1/19/85:
+ * lotsfree is 512k bytes, but at most 1/4 of memory
+ * desfree is 200k bytes, but at most 1/8 of memory
+ * minfree is 64k bytes, but at most 1/2 of desfree
+ */
+#define LOTSFREE (512 * 1024)
+#define LOTSFREEFRACT 4
+#define DESFREE (200 * 1024)
+#define DESFREEFRACT 8
+#define MINFREE (64 * 1024)
+#define MINFREEFRACT 2
+
+/*
+ * There are two clock hands, initially separated by HANDSPREAD bytes
+ * (but at most all of user memory). The amount of time to reclaim
+ * a page once the pageout process examines it increases with this
+ * distance and decreases as the scan rate rises.
+ */
+#define HANDSPREAD (2 * 1024 * 1024)
+
+/*
+ * The number of times per second to recompute the desired paging rate
+ * and poke the pagedaemon.
+ */
+#define RATETOSCHEDPAGING 4
+
+/*
+ * Believed threshold (in megabytes) for which interleaved
+ * swapping area is desirable.
+ */
+#define LOTSOFMEM 2
+
+#define mapin(pte, v, pfnum, prot) \
+ {(*(int *)(pte) = ((pfnum)<<PGSHIFT) | (prot)) ; }
+
+/*
+ * Mach derived constants
+ */
+
+/* user/kernel map constants */
+#define VM_MIN_ADDRESS ((vm_offset_t)0)
+#define VM_MAXUSER_ADDRESS ((vm_offset_t)0xFDBFE000)
+#define UPT_MIN_ADDRESS ((vm_offset_t)0xFDC00000)
+#define UPT_MAX_ADDRESS ((vm_offset_t)0xFDFF7000)
+#define VM_MAX_ADDRESS UPT_MAX_ADDRESS
+#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)0xFDFF7000)
+#define UPDT VM_MIN_KERNEL_ADDRESS
+#define KPT_MIN_ADDRESS ((vm_offset_t)0xFDFF8000)
+#define KPT_MAX_ADDRESS ((vm_offset_t)0xFDFFF000)
+#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)0xFF7FF000)
+
+/* virtual sizes (bytes) for various kernel submaps */
+#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
+#define VM_KMEM_SIZE (NKMEMCLUSTERS*CLBYTES)
+#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
+
+/* # of kernel PT pages (initial only, can grow dynamically) */
+#define VM_KERNEL_PT_PAGES ((vm_size_t)2) /* XXX: SYSPTSIZE */
+
+/* pcb base */
+#define pcbb(p) ((u_int)(p)->p_addr)
+
+/*
+ * Flush MMU TLB
+ */
+
+#ifndef I386_CR3PAT
+#define I386_CR3PAT 0x0
+#endif
+
+#ifdef notyet
+#define _cr3() ({u_long rtn; \
+ asm (" movl %%cr3,%%eax; movl %%eax,%0 " \
+ : "=g" (rtn) \
+ : \
+ : "ax"); \
+ rtn; \
+})
+
+#define load_cr3(s) ({ u_long val; \
+ val = (s) | I386_CR3PAT; \
+ asm ("movl %0,%%eax; movl %%eax,%%cr3" \
+ : \
+ : "g" (val) \
+ : "ax"); \
+})
+
+#define tlbflush() ({ u_long val; \
+ val = u.u_pcb.pcb_ptd | I386_CR3PAT; \
+ asm ("movl %0,%%eax; movl %%eax,%%cr3" \
+ : \
+ : "g" (val) \
+ : "ax"); \
+})
+#endif
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
new file mode 100644
index 0000000..0fb7701
--- /dev/null
+++ b/sys/amd64/isa/clock.c
@@ -0,0 +1,271 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz and Don Ahn.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)clock.c 7.2 (Berkeley) 5/12/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 5 00158
+ * -------------------- ----- ----------------------
+ *
+ * 14 Aug 92 Arne Henrik Juul Added code in the kernel to
+ * allow for DST in the BIOS.
+ * 17 Jan 93 Bruce Evans Fixed leap year and second
+ * calculations
+ * 01 Feb 93 Julian Elischer Added code to for the cpu
+ * speed independent spinwait()
+ * function, (used by scsi and others)
+ * 25 Mar 93 Sean Eric Fagan Add microtimer support using timer 1
+ * 08 Apr 93 Poul-Henning Kamp/P-HK Fixes, and support for dcfclock
+ * 26 Apr 93 Bruce Evans Eliminate findspeed, new spinwait
+ * 26 Apr 93 Rodney W. Grimes I merged in Bruce changes and hope I
+ * still kept the other fixes... Had to
+ * add back in findcpuspeed that Bruce
+ * had removed.
+ */
+
+/*
+ * Primitive clock interrupt routines.
+ */
+#include "param.h"
+#include "systm.h"
+#include "time.h"
+#include "kernel.h"
+#include "machine/segments.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/isa.h"
+#include "i386/isa/rtc.h"
+#include "i386/isa/timerreg.h"
+
+#define DAYST 119
+#define DAYEN 303
+
+/* X-tals being what they are, it's nice to be able to fudge this one... */
+/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
+#ifndef TIMER_FREQ
+#define TIMER_FREQ 1193182 /* XXX - should be in isa.h */
+#endif
+
+startrtclock() {
+ int s;
+
+ findcpuspeed(); /* use the clock (while it's free)
+ to find the cpu speed */
+ /* initialize 8253 clock */
+ outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
+
+ /* Correct rounding will buy us a better precision in timekeeping */
+ outb (IO_TIMER1, (TIMER_FREQ+hz/2)/hz);
+ outb (IO_TIMER1, ((TIMER_FREQ+hz/2)/hz)/256);
+
+ /* initialize brain-dead battery powered clock */
+ outb (IO_RTC, RTC_STATUSA);
+ outb (IO_RTC+1, 0x26);
+ outb (IO_RTC, RTC_STATUSB);
+ outb (IO_RTC+1, 2);
+
+ outb (IO_RTC, RTC_DIAG);
+ if (s = inb (IO_RTC+1))
+ printf("RTC BIOS diagnostic error %b\n", s, RTCDG_BITS);
+ outb (IO_RTC, RTC_DIAG);
+ outb (IO_RTC+1, 0);
+}
+
+unsigned int delaycount; /* calibrated loop variable (1 millisecond) */
+
+#define FIRST_GUESS 0x2000
+findcpuspeed()
+{
+ unsigned char low;
+ unsigned int remainder;
+
+ /* Put counter in count down mode */
+ outb(IO_TIMER1+3, 0x34);
+ outb(IO_TIMER1, 0xff);
+ outb(IO_TIMER1, 0xff);
+ delaycount = FIRST_GUESS;
+ spinwait(1);
+ /* Read the value left in the counter */
+ low = inb(IO_TIMER1); /* least siginifcant */
+ remainder = inb(IO_TIMER1); /* most significant */
+ remainder = (remainder<<8) + low ;
+ /* Formula for delaycount is :
+ * (loopcount * timer clock speed)/ (counter ticks * 1000)
+ */
+ delaycount = (FIRST_GUESS * (TIMER_FREQ/1000)) / (0xffff-remainder);
+}
+
+
+/* convert 2 digit BCD number */
+bcd(i)
+int i;
+{
+ return ((i/16)*10 + (i%16));
+}
+
+/* convert years to seconds (from 1970) */
+unsigned long
+ytos(y)
+int y;
+{
+ int i;
+ unsigned long ret;
+
+ ret = 0;
+ for(i = 1970; i < y; i++) {
+ if (i % 4) ret += 365*24*60*60;
+ else ret += 366*24*60*60;
+ }
+ return ret;
+}
+
+/* convert months to seconds */
+unsigned long
+mtos(m,leap)
+int m,leap;
+{
+ int i;
+ unsigned long ret;
+
+ ret = 0;
+ for(i=1;i<m;i++) {
+ switch(i){
+ case 1: case 3: case 5: case 7: case 8: case 10: case 12:
+ ret += 31*24*60*60; break;
+ case 4: case 6: case 9: case 11:
+ ret += 30*24*60*60; break;
+ case 2:
+ if (leap) ret += 29*24*60*60;
+ else ret += 28*24*60*60;
+ }
+ }
+ return ret;
+}
+
+
+/*
+ * Initialize the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+inittodr(base)
+ time_t base;
+{
+ unsigned long sec;
+ int leap,day_week,t,yd;
+ int sa,s;
+
+ /* do we have a realtime clock present? (otherwise we loop below) */
+ sa = rtcin(RTC_STATUSA);
+ if (sa == 0xff || sa == 0) return;
+
+ /* ready for a read? */
+ while ((sa&RTCSA_TUP) == RTCSA_TUP)
+ sa = rtcin(RTC_STATUSA);
+
+ sec = bcd(rtcin(RTC_YEAR)) + 1900;
+ if (sec < 1970)
+ sec += 100;
+ leap = !(sec % 4); sec = ytos(sec); /* year */
+ yd = mtos(bcd(rtcin(RTC_MONTH)),leap); sec += yd; /* month */
+ t = (bcd(rtcin(RTC_DAY))-1) * 24*60*60; sec += t; yd += t; /* date */
+ day_week = rtcin(RTC_WDAY); /* day */
+ sec += bcd(rtcin(RTC_HRS)) * 60*60; /* hour */
+ sec += bcd(rtcin(RTC_MIN)) * 60; /* minutes */
+ sec += bcd(rtcin(RTC_SEC)); /* seconds */
+
+ /* XXX off by one? Need to calculate DST on SUNDAY */
+ /* Perhaps we should have the RTC hold GMT time to save */
+ /* us the bother of converting. */
+ yd = yd / (24*60*60);
+ if ((yd >= DAYST) && ( yd <= DAYEN)) {
+ sec -= 60*60;
+ }
+ sec += tz.tz_minuteswest * 60;
+
+ time.tv_sec = sec;
+}
+
+#ifdef garbage
+/*
+ * Initialze the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+test_inittodr(base)
+ time_t base;
+{
+
+ outb(IO_RTC,9); /* year */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,8); /* month */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,7); /* day */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,4); /* hour */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,2); /* minutes */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,0); /* seconds */
+ printf("%d\n",bcd(inb(IO_RTC+1)));
+
+ time.tv_sec = base;
+}
+#endif
+
+/*
+ * Restart the clock.
+ */
+resettodr()
+{
+}
+
+/*
+ * Wire clock interrupt in.
+ */
+#define V(s) __CONCAT(V, s)
+extern V(clk)();
+enablertclock() {
+ setidt(ICU_OFFSET+0, &V(clk), SDT_SYS386IGT, SEL_KPL);
+ INTREN(IRQ0);
+}
+
+/*
+ * Delay for some number of milliseconds.
+ */
+void
+spinwait(millisecs)
+ int millisecs;
+{
+ DELAY(1000 * millisecs);
+}
diff --git a/sys/amd64/isa/icu.h b/sys/amd64/isa/icu.h
new file mode 100644
index 0000000..4866d8d
--- /dev/null
+++ b/sys/amd64/isa/icu.h
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)icu.h 5.6 (Berkeley) 5/9/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00158
+ * -------------------- ----- ----------------------
+ *
+ * 25 Apr 93 Bruce Evans New fast interrupt code (intr-0.1)
+ */
+
+/*
+ * AT/386 Interrupt Control constants
+ * W. Jolitz 8/89
+ */
+
+#ifndef __ICU__
+#define __ICU__
+
+#ifndef LOCORE
+
+/*
+ * Interrupt "level" mechanism variables, masks, and macros
+ */
+extern unsigned imen; /* interrupt mask enable */
+extern unsigned cpl; /* current priority level mask */
+
+extern unsigned highmask; /* group of interrupts masked with splhigh() */
+extern unsigned ttymask; /* group of interrupts masked with spltty() */
+extern unsigned biomask; /* group of interrupts masked with splbio() */
+extern unsigned netmask; /* group of interrupts masked with splimp() */
+
+#define INTREN(s) (imen &= ~(s), SET_ICUS())
+#define INTRDIS(s) (imen |= (s), SET_ICUS())
+#define INTRMASK(msk,s) (msk |= (s))
+#if 0
+#define SET_ICUS() (outb(IO_ICU1 + 1, imen), outb(IU_ICU2 + 1, imen >> 8))
+#else
+/*
+ * XXX - IO_ICU* are defined in isa.h, not icu.h, and nothing much bothers to
+ * include isa.h, while too many things include icu.h.
+ */
+#define SET_ICUS() (outb(0x21, imen), outb(0xa1, imen >> 8))
+#endif
+
+#endif
+
+/*
+ * Interrupt enable bits -- in order of priority
+ */
+#define IRQ0 0x0001 /* highest priority - timer */
+#define IRQ1 0x0002
+#define IRQ_SLAVE 0x0004
+#define IRQ8 0x0100
+#define IRQ9 0x0200
+#define IRQ2 IRQ9
+#define IRQ10 0x0400
+#define IRQ11 0x0800
+#define IRQ12 0x1000
+#define IRQ13 0x2000
+#define IRQ14 0x4000
+#define IRQ15 0x8000
+#define IRQ3 0x0008
+#define IRQ4 0x0010
+#define IRQ5 0x0020
+#define IRQ6 0x0040
+#define IRQ7 0x0080 /* lowest - parallel printer */
+
+/*
+ * Interrupt Control offset into Interrupt descriptor table (IDT)
+ */
+#define ICU_OFFSET 32 /* 0-31 are processor exceptions */
+#define ICU_LEN 16 /* 32-47 are ISA interrupts */
+
+#endif __ICU__
diff --git a/sys/amd64/isa/isa.c b/sys/amd64/isa/isa.c
new file mode 100644
index 0000000..8707b43
--- /dev/null
+++ b/sys/amd64/isa/isa.c
@@ -0,0 +1,766 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)isa.c 7.2 (Berkeley) 5/13/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 4 00163
+ * -------------------- ----- ----------------------
+ *
+ * 18 Aug 92 Frank Maclachlan *See comments below
+ * 25 Mar 93 Rodney W. Grimes Added counter for stray interrupt,
+ * turned on logging of stray interrupts,
+ * Now prints maddr, msize, and flags
+ * after finding a device.
+ * 26 Apr 93 Bruce Evans New intr-0.1 code
+ * Rodney W. Grimes Only print io address if id_alive != -1
+ * 17 May 93 Rodney W. Grimes renamed stray interrupt counters to
+ * work with new intr-0.1 code.
+ * Enabled printf for interrupt masks to
+ * aid in bug reports.
+ * 27 May 93 Guido van Rooij New routine add find_isa_dev
+ */
+static char rcsid[] = "$Header: /usr/src/sys.386bsd/i386/isa/RCS/isa.c,v 1.2 92/01/21 14:34:23 william Exp Locker: root $";
+
+/*
+ * code to manage AT bus
+ *
+ * 92/08/18 Frank P. MacLachlan (fpm@crash.cts.com):
+ * Fixed uninitialized variable problem and added code to deal
+ * with DMA page boundaries in isa_dmarangecheck(). Fixed word
+ * mode DMA count compution and reorganized DMA setup code in
+ * isa_dmastart()
+ */
+
+#include "param.h"
+#include "systm.h"
+#include "conf.h"
+#include "file.h"
+#include "buf.h"
+#include "uio.h"
+#include "syslog.h"
+#include "malloc.h"
+#include "rlist.h"
+#include "machine/segments.h"
+#include "vm/vm.h"
+#include "i386/isa/isa_device.h"
+#include "i386/isa/isa.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/ic/i8237.h"
+#include "i386/isa/ic/i8042.h"
+
+/*
+** Register definitions for DMA controller 1 (channels 0..3):
+*/
+#define DMA1_CHN(c) (IO_DMA1 + 1*(2*(c))) /* addr reg for channel c */
+#define DMA1_SMSK (IO_DMA1 + 1*10) /* single mask register */
+#define DMA1_MODE (IO_DMA1 + 1*11) /* mode register */
+#define DMA1_FFC (IO_DMA1 + 1*12) /* clear first/last FF */
+
+/*
+** Register definitions for DMA controller 2 (channels 4..7):
+*/
+#define DMA2_CHN(c) (IO_DMA1 + 2*(2*(c))) /* addr reg for channel c */
+#define DMA2_SMSK (IO_DMA2 + 2*10) /* single mask register */
+#define DMA2_MODE (IO_DMA2 + 2*11) /* mode register */
+#define DMA2_FFC (IO_DMA2 + 2*12) /* clear first/last FF */
+
+int config_isadev __P((struct isa_device *, u_int *));
+
+#ifdef notyet
+struct rlist *isa_iomem;
+
+/*
+ * Configure all ISA devices
+ */
+isa_configure() {
+ struct isa_device *dvp;
+ struct isa_driver *dp;
+
+ splhigh();
+ INTREN(IRQ_SLAVE);
+ /*rlist_free(&isa_iomem, 0xa0000, 0xfffff);*/
+ for (dvp = isa_devtab_tty; dvp; dvp++)
+ (void) config_isadev(dvp, &ttymask);
+ for (dvp = isa_devtab_bio; dvp; dvp++)
+ (void) config_isadev(dvp, &biomask);
+ for (dvp = isa_devtab_net; dvp; dvp++)
+ (void) config_isadev(dvp, &netmask);
+ for (dvp = isa_devtab_null; dvp; dvp++)
+ (void) config_isadev(dvp, (u_int *) NULL);
+#include "sl.h"
+#if NSL > 0
+ netmask |= ttymask;
+ ttymask |= netmask;
+#endif
+/* printf("biomask %x ttymask %x netmask %x\n", biomask, ttymask, netmask); */
+ splnone();
+}
+
+/*
+ * Configure an ISA device.
+ */
+config_isadev(isdp, mp)
+ struct isa_device *isdp;
+ u_int *mp;
+{
+ struct isa_driver *dp;
+ static short drqseen, irqseen;
+
+ if (dp = isdp->id_driver) {
+ /* if a device with i/o memory, convert to virtual address */
+ if (isdp->id_maddr) {
+ extern unsigned int atdevbase;
+
+ isdp->id_maddr -= IOM_BEGIN;
+ isdp->id_maddr += atdevbase;
+ }
+ isdp->id_alive = (*dp->probe)(isdp);
+ if (isdp->id_alive) {
+
+ printf("%s%d at port 0x%x ", dp->name,
+ isdp->id_unit, isdp->id_iobase);
+
+ /* check for conflicts */
+ if (irqseen & isdp->id_irq) {
+ printf("INTERRUPT CONFLICT - irq%d\n",
+ ffs(isdp->id_irq) - 1);
+ return (0);
+ }
+ if (isdp->id_drq != -1
+ && (drqseen & (1<<isdp->id_drq))) {
+ printf("DMA CONFLICT - drq%d\n", isdp->id_drq);
+ return (0);
+ }
+ /* NEED TO CHECK IOMEM CONFLICT HERE */
+
+ /* allocate and wire in device */
+ if(isdp->id_irq) {
+ int intrno;
+
+ intrno = ffs(isdp->id_irq)-1;
+ printf("irq %d ", intrno);
+ INTREN(isdp->id_irq);
+ if(mp)INTRMASK(*mp,isdp->id_irq);
+ setidt(NRSVIDT + intrno, isdp->id_intr,
+ SDT_SYS386IGT, SEL_KPL);
+ irqseen |= isdp->id_irq;
+ }
+ if (isdp->id_drq != -1) {
+ printf("drq %d ", isdp->id_drq);
+ drqseen |= 1 << isdp->id_drq;
+ }
+
+ (*dp->attach)(isdp);
+
+ printf("on isa\n");
+ }
+ return (1);
+ } else return(0);
+}
+#else /* notyet */
+/*
+ * Configure all ISA devices
+ */
+isa_configure() {
+ struct isa_device *dvp;
+ struct isa_driver *dp;
+
+ enable_intr();
+ splhigh();
+ INTREN(IRQ_SLAVE);
+ for (dvp = isa_devtab_tty; config_isadev(dvp,&ttymask); dvp++);
+ for (dvp = isa_devtab_bio; config_isadev(dvp,&biomask); dvp++);
+ for (dvp = isa_devtab_net; config_isadev(dvp,&netmask); dvp++);
+ for (dvp = isa_devtab_null; config_isadev(dvp,(u_int *) NULL); dvp++);
+#include "sl.h"
+#if NSL > 0
+ netmask |= ttymask;
+ ttymask |= netmask;
+#endif
+ /* biomask |= ttymask ; can some tty devices use buffers? */
+ printf("biomask %x ttymask %x netmask %x\n", biomask, ttymask, netmask);
+ splnone();
+}
+
+/*
+ * Configure an ISA device.
+ */
+config_isadev(isdp, mp)
+ struct isa_device *isdp;
+ u_int *mp;
+{
+ struct isa_driver *dp;
+
+ if (dp = isdp->id_driver) {
+ if (isdp->id_maddr) {
+ extern u_int atdevbase;
+
+ isdp->id_maddr -= 0xa0000; /* XXX should be a define */
+ isdp->id_maddr += atdevbase;
+ }
+ isdp->id_alive = (*dp->probe)(isdp);
+ if (isdp->id_alive) {
+ printf("%s%d", dp->name, isdp->id_unit);
+ /*
+ * The attach should really be after all the printf's
+ * but until all the drivers are fixed do it here.
+ * There is a comment below that shows where this
+ * really belongs. Rod Grimes 04/10/93
+ */
+ (*dp->attach)(isdp);
+ /*
+ * Only print the I/O address range if id_alive != -1
+ * Right now this is a temporary fix just for the new
+ * NPX code so that if it finds a 486 that can use trap
+ * 16 it will not report I/O addresses.
+ * Rod Grimes 04/26/94
+ */
+ if (isdp->id_alive != -1) {
+ printf(" at 0x%x", isdp->id_iobase);
+ if ((isdp->id_iobase + isdp->id_alive - 1) !=
+ isdp->id_iobase)
+ printf("-0x%x",
+ isdp->id_iobase +
+ isdp->id_alive - 1);
+ }
+ if(isdp->id_irq)
+ printf(" irq %d", ffs(isdp->id_irq)-1);
+ if (isdp->id_drq != -1)
+ printf(" drq %d", isdp->id_drq);
+ if (isdp->id_maddr != 0)
+ printf(" maddr 0x%x", kvtop(isdp->id_maddr));
+ if (isdp->id_msize != 0)
+ printf(" msize %d", isdp->id_msize);
+ if (isdp->id_flags != 0)
+ printf(" flags 0x%x", isdp->id_flags);
+ printf(" on isa\n");
+
+ /* This is the place the attach should be done! */
+ if(isdp->id_irq) {
+ int intrno;
+
+ intrno = ffs(isdp->id_irq)-1;
+ setidt(ICU_OFFSET+intrno, isdp->id_intr,
+ SDT_SYS386IGT, SEL_KPL);
+ if(mp)
+ INTRMASK(*mp,isdp->id_irq);
+ INTREN(isdp->id_irq);
+ }
+ }
+ return (1);
+ } else return(0);
+}
+#endif /* (!) notyet */
+
+#define IDTVEC(name) __CONCAT(X,name)
+/* default interrupt vector table entries */
+extern IDTVEC(intr0), IDTVEC(intr1), IDTVEC(intr2), IDTVEC(intr3),
+ IDTVEC(intr4), IDTVEC(intr5), IDTVEC(intr6), IDTVEC(intr7),
+ IDTVEC(intr8), IDTVEC(intr9), IDTVEC(intr10), IDTVEC(intr11),
+ IDTVEC(intr12), IDTVEC(intr13), IDTVEC(intr14), IDTVEC(intr15);
+
+static *defvec[16] = {
+ &IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
+ &IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
+ &IDTVEC(intr8), &IDTVEC(intr9), &IDTVEC(intr10), &IDTVEC(intr11),
+ &IDTVEC(intr12), &IDTVEC(intr13), &IDTVEC(intr14), &IDTVEC(intr15) };
+
+/* out of range default interrupt vector gate entry */
+extern IDTVEC(intrdefault);
+
+/*
+ * Fill in default interrupt table (in case of spuruious interrupt
+ * during configuration of kernel, setup interrupt control unit
+ */
+isa_defaultirq() {
+ int i;
+
+ /* icu vectors */
+ for (i = NRSVIDT ; i < NRSVIDT+ICU_LEN ; i++)
+ setidt(i, defvec[i], SDT_SYS386IGT, SEL_KPL);
+
+ /* out of range vectors */
+ for (i = NRSVIDT; i < NIDT; i++)
+ setidt(i, &IDTVEC(intrdefault), SDT_SYS386IGT, SEL_KPL);
+
+ /* initialize 8259's */
+ outb(IO_ICU1, 0x11); /* reset; program device, four bytes */
+ outb(IO_ICU1+1, NRSVIDT); /* starting at this vector index */
+ outb(IO_ICU1+1, 1<<2); /* slave on line 2 */
+#ifdef AUTO_EOI_1
+ outb(IO_ICU1+1, 2 | 1); /* auto EOI, 8086 mode */
+#else
+ outb(IO_ICU1+1, 1); /* 8086 mode */
+#endif
+ outb(IO_ICU1+1, 0xff); /* leave interrupts masked */
+ outb(IO_ICU1, 0x0a); /* default to IRR on read */
+ outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */
+
+ outb(IO_ICU2, 0x11); /* reset; program device, four bytes */
+ outb(IO_ICU2+1, NRSVIDT+8); /* staring at this vector index */
+ outb(IO_ICU2+1,2); /* my slave id is 2 */
+#ifdef AUTO_EOI_2
+ outb(IO_ICU2+1, 2 | 1); /* auto EOI, 8086 mode */
+#else
+ outb(IO_ICU2+1,1); /* 8086 mode */
+#endif
+ outb(IO_ICU2+1, 0xff); /* leave interrupts masked */
+ outb(IO_ICU2, 0x0a); /* default to IRR on read */
+}
+
+/* region of physical memory known to be contiguous */
+vm_offset_t isaphysmem;
+static caddr_t dma_bounce[8]; /* XXX */
+static char bounced[8]; /* XXX */
+#define MAXDMASZ 512 /* XXX */
+
+/* high byte of address is stored in this port for i-th dma channel */
+static short dmapageport[8] =
+ { 0x87, 0x83, 0x81, 0x82, 0x8f, 0x8b, 0x89, 0x8a };
+
+/*
+ * isa_dmacascade(): program 8237 DMA controller channel to accept
+ * external dma control by a board.
+ */
+void isa_dmacascade(unsigned chan)
+{
+ if (chan > 7)
+ panic("isa_dmacascade: impossible request");
+
+ /* set dma channel mode, and set dma channel mode */
+ if ((chan & 4) == 0) {
+ outb(DMA1_MODE, DMA37MD_CASCADE | chan);
+ outb(DMA1_SMSK, chan);
+ } else {
+ outb(DMA2_MODE, DMA37MD_CASCADE | (chan & 3));
+ outb(DMA2_SMSK, chan & 3);
+ }
+}
+
+/*
+ * isa_dmastart(): program 8237 DMA controller channel, avoid page alignment
+ * problems by using a bounce buffer.
+ */
+void isa_dmastart(int flags, caddr_t addr, unsigned nbytes, unsigned chan)
+{ vm_offset_t phys;
+ int waport;
+ caddr_t newaddr;
+
+ if ( chan > 7
+ || (chan < 4 && nbytes > (1<<16))
+ || (chan >= 4 && (nbytes > (1<<17) || (u_int)addr & 1)))
+ panic("isa_dmastart: impossible request");
+
+ if (isa_dmarangecheck(addr, nbytes, chan)) {
+ if (dma_bounce[chan] == 0)
+ dma_bounce[chan] =
+ /*(caddr_t)malloc(MAXDMASZ, M_TEMP, M_WAITOK);*/
+ (caddr_t) isaphysmem + NBPG*chan;
+ bounced[chan] = 1;
+ newaddr = dma_bounce[chan];
+ *(int *) newaddr = 0; /* XXX */
+
+ /* copy bounce buffer on write */
+ if (!(flags & B_READ))
+ bcopy(addr, newaddr, nbytes);
+ addr = newaddr;
+ }
+
+ /* translate to physical */
+ phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
+
+ if ((chan & 4) == 0) {
+ /*
+ * Program one of DMA channels 0..3. These are
+ * byte mode channels.
+ */
+ /* set dma channel mode, and reset address ff */
+ if (flags & B_READ)
+ outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|chan);
+ else
+ outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_READ|chan);
+ outb(DMA1_FFC, 0);
+
+ /* send start address */
+ waport = DMA1_CHN(chan);
+ outb(waport, phys);
+ outb(waport, phys>>8);
+ outb(dmapageport[chan], phys>>16);
+
+ /* send count */
+ outb(waport + 1, --nbytes);
+ outb(waport + 1, nbytes>>8);
+
+ /* unmask channel */
+ outb(DMA1_SMSK, chan);
+ } else {
+ /*
+ * Program one of DMA channels 4..7. These are
+ * word mode channels.
+ */
+ /* set dma channel mode, and reset address ff */
+ if (flags & B_READ)
+ outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|(chan&3));
+ else
+ outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_READ|(chan&3));
+ outb(DMA2_FFC, 0);
+
+ /* send start address */
+ waport = DMA2_CHN(chan - 4);
+ outb(waport, phys>>1);
+ outb(waport, phys>>9);
+ outb(dmapageport[chan], phys>>16);
+
+ /* send count */
+ nbytes >>= 1;
+ outb(waport + 2, --nbytes);
+ outb(waport + 2, nbytes>>8);
+
+ /* unmask channel */
+ outb(DMA2_SMSK, chan & 3);
+ }
+}
+
+void isa_dmadone(int flags, caddr_t addr, int nbytes, int chan)
+{
+
+ /* copy bounce buffer on read */
+ /*if ((flags & (B_PHYS|B_READ)) == (B_PHYS|B_READ))*/
+ if (bounced[chan]) {
+ bcopy(dma_bounce[chan], addr, nbytes);
+ bounced[chan] = 0;
+ }
+}
+
+/*
+ * Check for problems with the address range of a DMA transfer
+ * (non-contiguous physical pages, outside of bus address space,
+ * crossing DMA page boundaries).
+ * Return true if special handling needed.
+ */
+
+isa_dmarangecheck(caddr_t va, unsigned length, unsigned chan) {
+ vm_offset_t phys, priorpage = 0, endva;
+ u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
+
+ endva = (vm_offset_t)round_page(va + length);
+ for (; va < (caddr_t) endva ; va += NBPG) {
+ phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
+#define ISARAM_END RAM_END
+ if (phys == 0)
+ panic("isa_dmacheck: no physical page present");
+ if (phys > ISARAM_END)
+ return (1);
+ if (priorpage) {
+ if (priorpage + NBPG != phys)
+ return (1);
+ /* check if crossing a DMA page boundary */
+ if (((u_int)priorpage ^ (u_int)phys) & dma_pgmsk)
+ return (1);
+ }
+ priorpage = phys;
+ }
+ return (0);
+}
+
+/* head of queue waiting for physmem to become available */
+struct buf isa_physmemq;
+
+/* blocked waiting for resource to become free for exclusive use */
+static isaphysmemflag;
+/* if waited for and call requested when free (B_CALL) */
+static void (*isaphysmemunblock)(); /* needs to be a list */
+
+/*
+ * Allocate contiguous physical memory for transfer, returning
+ * a *virtual* address to region. May block waiting for resource.
+ * (assumed to be called at splbio())
+ */
+caddr_t
+isa_allocphysmem(caddr_t va, unsigned length, void (*func)()) {
+
+ isaphysmemunblock = func;
+ while (isaphysmemflag & B_BUSY) {
+ isaphysmemflag |= B_WANTED;
+ sleep(&isaphysmemflag, PRIBIO);
+ }
+ isaphysmemflag |= B_BUSY;
+
+ return((caddr_t)isaphysmem);
+}
+
+/*
+ * Free contiguous physical memory used for transfer.
+ * (assumed to be called at splbio())
+ */
+void
+isa_freephysmem(caddr_t va, unsigned length) {
+
+ isaphysmemflag &= ~B_BUSY;
+ if (isaphysmemflag & B_WANTED) {
+ isaphysmemflag &= B_WANTED;
+ wakeup(&isaphysmemflag);
+ if (isaphysmemunblock)
+ (*isaphysmemunblock)();
+ }
+}
+
+/*
+ * Handle a NMI, possibly a machine check.
+ * return true to panic system, false to ignore.
+ */
+isa_nmi(cd) {
+
+ log(LOG_CRIT, "\nNMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70));
+ return(0);
+}
+
+/*
+ * Caught a stray interrupt, notify
+ */
+isa_strayintr(d) {
+
+ /* DON'T BOTHER FOR NOW! */
+ /* for some reason, we get bursts of intr #7, even if not enabled! */
+ /*
+ * Well the reason you got bursts of intr #7 is because someone
+ * raised an interrupt line and dropped it before the 8259 could
+ * prioritize it. This is documented in the intel data book. This
+ * means you have BAD hardware! I have changed this so that only
+ * the first 5 get logged, then it quits logging them, and puts
+ * out a special message. rgrimes 3/25/1993
+ */
+ extern u_long intrcnt_stray;
+
+ intrcnt_stray++;
+ if (intrcnt_stray <= 5)
+ log(LOG_ERR,"ISA strayintr %x\n", d);
+ if (intrcnt_stray == 5)
+ log(LOG_CRIT,"Too many ISA strayintr not logging any more\n");
+}
+
+/*
+ * Wait "n" microseconds.
+ * Relies on timer 1 counting down from (TIMER_FREQ / hz) at
+ * (2 * TIMER_FREQ) Hz.
+ * Note: timer had better have been programmed before this is first used!
+ * (The standard programming causes the timer to generate a square wave and
+ * the counter is decremented twice every cycle.)
+ */
+#define CF (2 * TIMER_FREQ)
+#define TIMER_FREQ 1193182 /* XXX - should be elsewhere */
+
+extern int hz; /* XXX - should be elsewhere */
+
+int DELAY(n)
+ int n;
+{
+ int counter_limit;
+ int prev_tick;
+ int tick;
+ int ticks_left;
+ int sec;
+ int usec;
+
+#ifdef DELAYDEBUG
+ int getit_calls = 1;
+ int n1;
+ static int state = 0;
+
+ if (state == 0) {
+ state = 1;
+ for (n1 = 1; n1 <= 10000000; n1 *= 10)
+ DELAY(n1);
+ state = 2;
+ }
+ if (state == 1)
+ printf("DELAY(%d)...", n);
+#endif
+
+ /*
+ * Read the counter first, so that the rest of the setup overhead is
+ * counted. Guess the initial overhead is 20 usec (on most systems it
+ * takes about 1.5 usec for each of the i/o's in getit(). The loop
+ * takes about 6 usec on a 486/33 and 13 usec on a 386/20. The
+ * multiplications and divisions to scale the count take a while).
+ */
+ prev_tick = getit(0, 0);
+ n -= 20;
+
+ /*
+ * Calculate (n * (CF / 1e6)) without using floating point and without
+ * any avoidable overflows.
+ */
+ sec = n / 1000000;
+ usec = n - sec * 1000000;
+ ticks_left = sec * CF
+ + usec * (CF / 1000000)
+ + usec * ((CF % 1000000) / 1000) / 1000
+ + usec * (CF % 1000) / 1000000;
+
+ counter_limit = TIMER_FREQ / hz;
+ while (ticks_left > 0) {
+ tick = getit(0, 0);
+#ifdef DELAYDEBUG
+ ++getit_calls;
+#endif
+ if (tick > prev_tick)
+ ticks_left -= prev_tick - (tick - counter_limit);
+ else
+ ticks_left -= prev_tick - tick;
+ prev_tick = tick;
+ }
+#ifdef DELAYDEBUG
+ if (state == 1)
+ printf(" %d calls to getit() at %d usec each\n",
+ getit_calls, (n + 5) / getit_calls);
+#endif
+}
+
+getit(unit, timer) {
+ int high;
+ int low;
+
+ /*
+ * XXX - isa.h defines bogus timers. There's no such timer as
+ * IO_TIMER_2 = 0x48. There's a timer in the CMOS RAM chip but
+ * its interface is quite different. Neither timer is an 8252.
+ * We actually only call this with unit = 0 and timer = 0. It
+ * could be static...
+ */
+ /*
+ * Protect ourself against interrupts.
+ * XXX - sysbeep() and sysbeepstop() need protection.
+ */
+ disable_intr();
+ /*
+ * Latch the count for 'timer' (cc00xxxx, c = counter, x = any).
+ */
+ outb(IO_TIMER1 + 3, timer << 6);
+
+ low = inb(IO_TIMER1 + timer);
+ high = inb(IO_TIMER1 + timer);
+ enable_intr();
+ return ((high << 8) | low);
+}
+
+static beeping;
+static
+sysbeepstop(f)
+{
+ /* disable counter 2 */
+ outb(0x61, inb(0x61) & 0xFC);
+ if (f)
+ timeout(sysbeepstop, 0, f);
+ else
+ beeping = 0;
+}
+
+void sysbeep(int pitch, int period)
+{
+
+ outb(0x61, inb(0x61) | 3); /* enable counter 2 */
+ /*
+ * XXX - move timer stuff to clock.c.
+ * Program counter 2:
+ * ccaammmb, c counter, a = access, m = mode, b = BCD
+ * 1011x110, 11 for aa = LSB then MSB, x11 for mmm = square wave.
+ */
+ outb(0x43, 0xb6); /* set command for counter 2, 2 byte write */
+
+ outb(0x42, pitch);
+ outb(0x42, (pitch>>8));
+
+ if (!beeping) {
+ beeping = period;
+ timeout(sysbeepstop, period/2, period);
+ }
+}
+
+/*
+ * Pass command to keyboard controller (8042)
+ */
+unsigned kbc_8042cmd(val) {
+
+ while (inb(KBSTATP)&KBS_IBF);
+ if (val) outb(KBCMDP, val);
+ while (inb(KBSTATP)&KBS_IBF);
+ return (inb(KBDATAP));
+}
+
+/*
+ * find an ISA device in a given isa_devtab_* table, given
+ * the table to search, the expected id_driver entry, and the unit number.
+ *
+ * this function is defined in isa_device.h, and this location is debatable;
+ * i put it there because it's useless w/o, and directly operates on
+ * the other stuff in that file.
+ *
+ */
+
+struct isa_device *find_isadev(table, driverp, unit)
+ struct isa_device *table;
+ struct isa_driver *driverp;
+ int unit;
+{
+ if (driverp == NULL) /* sanity check */
+ return NULL;
+
+ while ((table->id_driver != driverp) || (table->id_unit != unit)) {
+ if (table->id_driver == 0)
+ return NULL;
+
+ table++;
+ }
+
+ return table;
+}
+
+/*
+ * Return nonzero if a (masked) irq is pending for a given device.
+ */
+int
+isa_irq_pending(dvp)
+ struct isa_device *dvp;
+{
+ unsigned id_irq;
+
+ id_irq = (unsigned short) dvp->id_irq; /* XXX silly type in struct */
+ if (id_irq & 0xff)
+ return (inb(IO_ICU1) & id_irq);
+ return (inb(IO_ICU2) & (id_irq >> 8));
+}
diff --git a/sys/amd64/isa/isa.h b/sys/amd64/isa/isa.h
new file mode 100644
index 0000000..a9c042d
--- /dev/null
+++ b/sys/amd64/isa/isa.h
@@ -0,0 +1,188 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)isa.h 5.7 (Berkeley) 5/9/91
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 3 00158
+ * -------------------- ----- ----------------------
+ *
+ * 15 Feb 93 Julian Elischer Added entries for some scsi adapters
+ * 06 Apr 93 Rodney W. Grimes Added com3 and com4, added IO_ISASIZES
+ * section
+ * 26 Apr 93 Bruce Evans Support for intr-0.1
+ */
+
+/*
+ * ISA Bus conventions
+ */
+
+#ifndef LOCORE
+#include <sys/cdefs.h>
+
+unsigned char rtcin __P((int));
+extern unsigned int atdevbase; /* offset in virtual memory of ISA io mem */
+void sysbeep __P((int, int));
+unsigned kbd_8042cmd __P((int));
+struct isa_device;
+int isa_irq_pending __P((struct isa_device *dvp));
+#endif
+
+
+/*
+ * Input / Output Port Assignments
+ */
+
+#ifndef IO_BEGIN
+#define IO_ISABEGIN 0x000 /* 0x000 - Beginning of I/O Registers */
+
+ /* CPU Board */
+#define IO_DMA1 0x000 /* 8237A DMA Controller #1 */
+#define IO_ICU1 0x020 /* 8259A Interrupt Controller #1 */
+#define IO_TIMER1 0x040 /* 8252 Timer #1 */
+#define IO_TIMER2 0x048 /* 8252 Timer #2 */
+#define IO_KBD 0x060 /* 8042 Keyboard */
+#define IO_RTC 0x070 /* RTC */
+#define IO_NMI IO_RTC /* NMI Control */
+#define IO_DMAPG 0x080 /* DMA Page Registers */
+#define IO_ICU2 0x0A0 /* 8259A Interrupt Controller #2 */
+#define IO_DMA2 0x0C0 /* 8237A DMA Controller #2 */
+#define IO_NPX 0x0F0 /* Numeric Coprocessor */
+
+ /* Cards */
+ /* 0x100 - 0x16F Open */
+
+#define IO_WD2 0x170 /* Secondary Fixed Disk Controller */
+
+ /* 0x178 - 0x1EF Open */
+
+#define IO_WD1 0x1f0 /* Primary Fixed Disk Controller */
+#define IO_GAME 0x200 /* Game Controller */
+
+ /* 0x208 - 0x277 Open */
+
+#define IO_LPT2 0x278 /* Parallel Port #2 */
+
+ /* 0x280 - 0x2E7 Open */
+
+#define IO_COM4 0x2e8 /* COM4 i/o address */
+
+ /* 0x2F0 - 0x2F7 Open */
+
+#define IO_COM2 0x2f8 /* COM2 i/o address */
+ /* 0x300 - 0x32F Open */
+
+#define IO_BT0 0x330 /* bustek 742a default addr. */
+#define IO_AHA0 0x330 /* adaptec 1542 default addr. */
+#define IO_UHA0 0x330 /* ultrastore 14f default addr. */
+#define IO_BT1 0x334 /* bustek 742a default addr. */
+#define IO_AHA1 0x334 /* adaptec 1542 default addr. */
+ /* 0x338 - 0x36F Open */
+
+#define IO_FD2 0x370 /* secondary base i/o address */
+#define IO_LPT1 0x378 /* Parallel Port #1 */
+
+ /* 0x380 - 0x3AF Open */
+
+#define IO_MDA 0x3B0 /* Monochome Adapter */
+#define IO_LPT3 0x3BC /* Monochome Adapter Printer Port */
+#define IO_VGA 0x3C0 /* E/VGA Ports */
+#define IO_CGA 0x3D0 /* CGA Ports */
+
+ /* 0x3E0 - 0x3E7 Open */
+
+#define IO_COM3 0x3e8 /* COM3 i/o address */
+#define IO_FD1 0x3f0 /* primary base i/o address */
+#define IO_COM1 0x3f8 /* COM1 i/o address */
+
+#define IO_ISAEND 0x3FF /* - 0x3FF End of I/O Registers */
+#endif IO_ISABEGIN
+
+/*
+ * Input / Output Port Sizes - these are from several sources, and tend
+ * to be the larger of what was found, ie COM ports can be 4, but some
+ * boards do not fully decode the address, thus 8 ports are used.
+ */
+
+#ifndef IO_ISASIZES
+#define IO_ISASIZES
+
+#define IO_COMSIZE 8 /* 8250, 16X50 com controllers (4?) */
+#define IO_CGASIZE 16 /* CGA controllers */
+#define IO_DMASIZE 16 /* 8237 DMA controllers */
+#define IO_DPGSIZE 32 /* 74LS612 DMA page reisters */
+#define IO_FDCSIZE 8 /* Nec765 floppy controllers */
+#define IO_WDCSIZE 8 /* WD compatible disk controllers */
+#define IO_GAMSIZE 16 /* AT compatible game controllers */
+#define IO_ICUSIZE 16 /* 8259A interrupt controllers */
+#define IO_KBDSIZE 16 /* 8042 Keyboard controllers */
+#define IO_LPTSIZE 8 /* LPT controllers, some use only 4 */
+#define IO_MDASIZE 16 /* Monochrome display controllers */
+#define IO_RTCSIZE 16 /* CMOS real time clock, NMI control */
+#define IO_TMRSIZE 16 /* 8253 programmable timers */
+#define IO_NPXSIZE 16 /* 80387/80487 NPX registers */
+#define IO_VGASIZE 16 /* VGA controllers */
+
+#endif /* IO_ISASIZES */
+
+/*
+ * Input / Output Memory Physical Addresses
+ */
+
+#ifndef IOM_BEGIN
+#define IOM_BEGIN 0x0a0000 /* Start of I/O Memory "hole" */
+#define IOM_END 0x100000 /* End of I/O Memory "hole" */
+#define IOM_SIZE (IOM_END - IOM_BEGIN)
+#endif IOM_BEGIN
+
+/*
+ * RAM Physical Address Space (ignoring the above mentioned "hole")
+ */
+
+#ifndef RAM_BEGIN
+#define RAM_BEGIN 0x0000000 /* Start of RAM Memory */
+#define RAM_END 0x1000000 /* End of RAM Memory */
+#define RAM_SIZE (RAM_END - RAM_BEGIN)
+#endif RAM_BEGIN
+
+/*
+ * Oddball Physical Memory Addresses
+ */
+#ifndef COMPAQ_RAMRELOC
+#define COMPAQ_RAMRELOC 0x80c00000 /* Compaq RAM relocation/diag */
+#define COMPAQ_RAMSETUP 0x80c00002 /* Compaq RAM setup */
+#define WEITEK_FPU 0xC0000000 /* WTL 2167 */
+#define CYRIX_EMC 0xC0000000 /* Cyrix EMC */
+#endif COMPAQ_RAMRELOC
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
new file mode 100644
index 0000000..73392fa
--- /dev/null
+++ b/sys/amd64/isa/npx.c
@@ -0,0 +1,564 @@
+/*-
+ * Copyright (c) 1990 William Jolitz.
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)npx.c 7.2 (Berkeley) 5/12/91
+ *
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00154
+ * -------------------- ----- ----------------------
+ *
+ * 20 Apr 93 Bruce Evans New npx-0.5 code
+ * 23 May 93 Rodney W. Grimes Return a special value of -1 from
+ * the probe code to keep isa_config from
+ * printing out the I/O address when we
+ * are using trap 16 handling.
+ *
+ */
+static char rcsid[] = "$Header: /usr/bill/working/sys/i386/isa/RCS/npx.c,v 1.2 92/01/21 14:34:27 william Exp $";
+
+#include "npx.h"
+#if NNPX > 0
+
+#include "param.h"
+#include "systm.h"
+#include "conf.h"
+#include "file.h"
+#include "proc.h"
+#include "machine/cpu.h"
+#include "machine/pcb.h"
+#include "machine/trap.h"
+#include "ioctl.h"
+#include "machine/specialreg.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/isa_device.h"
+#include "i386/isa/isa.h"
+
+/*
+ * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
+ */
+
+#ifdef __GNUC__
+
+#define disable_intr() __asm("cli")
+#define enable_intr() __asm("sti")
+#define fldcw(addr) __asm("fldcw %0" : : "m" (*addr))
+#define fnclex() __asm("fnclex")
+#define fninit() __asm("fninit")
+#define fnsave(addr) __asm("fnsave %0" : "=m" (*addr) : "0" (*addr))
+#define fnstcw(addr) __asm("fnstcw %0" : "=m" (*addr) : "0" (*addr))
+#define fnstsw(addr) __asm("fnstsw %0" : "=m" (*addr) : "0" (*addr))
+#define fp_divide_by_0() __asm("fldz; fld1; fdiv %st,%st(1); fwait")
+#define frstor(addr) __asm("frstor %0" : : "m" (*addr))
+#define fwait() __asm("fwait")
+#define read_eflags() ({u_long ef; \
+ __asm("pushf; popl %0" : "=a" (ef)); \
+ ef; })
+#define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
+ : : "n" (CR0_TS) : "ax")
+#define stop_emulating() __asm("clts")
+#define write_eflags(ef) __asm("pushl %0; popf" : : "a" ((u_long) ef))
+
+#else /* not __GNUC__ */
+
+void disable_intr __P((void));
+void enable_intr __P((void));
+void fldcw __P((caddr_t addr));
+void fnclex __P((void));
+void fninit __P((void));
+void fnsave __P((caddr_t addr));
+void fnstcw __P((caddr_t addr));
+void fnstsw __P((caddr_t addr));
+void fp_divide_by_0 __P((void));
+void frstor __P((caddr_t addr));
+void fwait __P((void));
+u_long read_eflags __P((void));
+void start_emulating __P((void));
+void stop_emulating __P((void));
+void write_eflags __P((u_long ef));
+
+#endif /* __GNUC__ */
+
+typedef u_char bool_t;
+
+extern struct gate_descriptor idt[];
+
+int npxdna __P((void));
+void npxexit __P((struct proc *p));
+void npxinit __P((u_int control));
+void npxintr __P((struct intrframe frame));
+void npxsave __P((struct save87 *addr));
+static int npxattach __P((struct isa_device *dvp));
+static int npxprobe __P((struct isa_device *dvp));
+static int npxprobe1 __P((struct isa_device *dvp));
+
+struct isa_driver npxdriver = {
+ npxprobe, npxattach, "npx",
+};
+
+u_int npx0mask;
+struct proc *npxproc;
+
+static bool_t npx_ex16;
+static bool_t npx_exists;
+static struct gate_descriptor npx_idt_probeintr;
+static int npx_intrno;
+static volatile u_int npx_intrs_while_probing;
+static bool_t npx_irq13;
+static volatile u_int npx_traps_while_probing;
+
+/*
+ * Special interrupt handlers. Someday intr0-intr15 will be used to count
+ * interrupts. We'll still need a special exception 16 handler. The busy
+ * latch stuff in probintr() can be moved to npxprobe().
+ */
+void probeintr(void);
+asm
+("
+ .text
+_probeintr:
+ ss
+ incl _npx_intrs_while_probing
+ pushl %eax
+ movb $0x20,%al /* EOI (asm in strings loses cpp features) */
+ outb %al,$0xa0 /* IO_ICU2 */
+ outb %al,$0x20 /* IO_ICU1 */
+ movb $0,%al
+ outb %al,$0xf0 /* clear BUSY# latch */
+ popl %eax
+ iret
+");
+
+void probetrap(void);
+asm
+("
+ .text
+_probetrap:
+ ss
+ incl _npx_traps_while_probing
+ fnclex
+ iret
+");
+
+/*
+ * Probe routine. Initialize cr0 to give correct behaviour for [f]wait
+ * whether the device exists or not (XXX should be elsewhere). Set flags
+ * to tell npxattach() what to do. Modify device struct if npx doesn't
+ * need to use interrupts. Return 1 if device exists.
+ */
+static int
+npxprobe(dvp)
+ struct isa_device *dvp;
+{
+ int result;
+ u_long save_eflags;
+ u_char save_icu1_mask;
+ u_char save_icu2_mask;
+ struct gate_descriptor save_idt_npxintr;
+ struct gate_descriptor save_idt_npxtrap;
+ /*
+ * This routine is now just a wrapper for npxprobe1(), to install
+ * special npx interrupt and trap handlers, to enable npx interrupts
+ * and to disable other interrupts. Someday isa_configure() will
+ * install suitable handlers and run with interrupts enabled so we
+ * won't need to do so much here.
+ */
+ npx_intrno = NRSVIDT + ffs(dvp->id_irq) - 1;
+ save_eflags = read_eflags();
+ disable_intr();
+ save_icu1_mask = inb(IO_ICU1 + 1);
+ save_icu2_mask = inb(IO_ICU2 + 1);
+ save_idt_npxintr = idt[npx_intrno];
+ save_idt_npxtrap = idt[16];
+ outb(IO_ICU1 + 1, ~(IRQ_SLAVE | dvp->id_irq));
+ outb(IO_ICU2 + 1, ~(dvp->id_irq >> 8));
+ setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL);
+ setidt(npx_intrno, probeintr, SDT_SYS386IGT, SEL_KPL);
+ npx_idt_probeintr = idt[npx_intrno];
+ enable_intr();
+ result = npxprobe1(dvp);
+ disable_intr();
+ outb(IO_ICU1 + 1, save_icu1_mask);
+ outb(IO_ICU2 + 1, save_icu2_mask);
+ idt[npx_intrno] = save_idt_npxintr;
+ idt[16] = save_idt_npxtrap;
+ write_eflags(save_eflags);
+ return (result);
+}
+
+static int
+npxprobe1(dvp)
+ struct isa_device *dvp;
+{
+ int control;
+ int status;
+#ifdef lint
+ npxintr();
+#endif
+ /*
+ * Partially reset the coprocessor, if any. Some BIOS's don't reset
+ * it after a warm boot.
+ */
+ outb(0xf1, 0); /* full reset on some systems, NOP on others */
+ outb(0xf0, 0); /* clear BUSY# latch */
+ /*
+ * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
+ * instructions. We must set the CR0_MP bit and use the CR0_TS
+ * bit to control the trap, because setting the CR0_EM bit does
+ * not cause WAIT instructions to trap. It's important to trap
+ * WAIT instructions - otherwise the "wait" variants of no-wait
+ * control instructions would degenerate to the "no-wait" variants
+ * after FP context switches but work correctly otherwise. It's
+ * particularly important to trap WAITs when there is no NPX -
+ * otherwise the "wait" variants would always degenerate.
+ *
+ * Try setting CR0_NE to get correct error reporting on 486DX's.
+ * Setting it should fail or do nothing on lesser processors.
+ */
+ load_cr0(rcr0() | CR0_MP | CR0_NE);
+ /*
+ * But don't trap while we're probing.
+ */
+ stop_emulating();
+ /*
+ * Finish resetting the coprocessor, if any. If there is an error
+ * pending, then we may get a bogus IRQ13, but probeintr() will handle
+ * it OK. Bogus halts have never been observed, but we enabled
+ * IRQ13 and cleared the BUSY# latch early to handle them anyway.
+ */
+ fninit();
+ DELAY(1000); /* wait for any IRQ13 (fwait might hang) */
+#ifdef DIAGNOSTIC
+ if (npx_intrs_while_probing != 0)
+ printf("fninit caused %u bogus npx interrupt(s)\n",
+ npx_intrs_while_probing);
+ if (npx_traps_while_probing != 0)
+ printf("fninit caused %u bogus npx trap(s)\n",
+ npx_traps_while_probing);
+#endif
+ /*
+ * Check for a status of mostly zero.
+ */
+ status = 0x5a5a;
+ fnstsw(&status);
+ if ((status & 0xb8ff) == 0) {
+ /*
+ * Good, now check for a proper control word.
+ */
+ control = 0x5a5a;
+ fnstcw(&control);
+ if ((control & 0x1f3f) == 0x033f) {
+ npx_exists = 1;
+ /*
+ * We have an npx, now divide by 0 to see if exception
+ * 16 works.
+ */
+ control &= ~(1 << 2); /* enable divide by 0 trap */
+ fldcw(&control);
+ npx_traps_while_probing = npx_intrs_while_probing = 0;
+ fp_divide_by_0();
+ if (npx_traps_while_probing != 0) {
+ /*
+ * Good, exception 16 works.
+ */
+ npx_ex16 = 1;
+ dvp->id_irq = 0; /* zap the interrupt */
+ /*
+ * special return value to flag that we do not
+ * actually use any I/O registers
+ */
+ return (-1);
+ }
+ if (npx_intrs_while_probing != 0) {
+ /*
+ * Bad, we are stuck with IRQ13.
+ */
+ npx_irq13 = 1;
+ npx0mask = dvp->id_irq; /* npxattach too late */
+ return (IO_NPXSIZE);
+ }
+ /*
+ * Worse, even IRQ13 is broken. Use emulator.
+ */
+ }
+ }
+ /*
+ * Probe failed, but we want to get to npxattach to initialize the
+ * emulator and say that it has been installed. XXX handle devices
+ * that aren't really devices better.
+ */
+ dvp->id_irq = 0;
+ return (IO_NPXSIZE);
+}
+
+/*
+ * Attach routine - announce which it is, and wire into system
+ */
+int
+npxattach(dvp)
+ struct isa_device *dvp;
+{
+ if (npx_ex16)
+ printf(" <Errors reported via Exception 16>");
+ else if (npx_irq13)
+ printf(" <Errors reported via IRQ 13>");
+ else if (npx_exists)
+ printf(" <Error reporting broken, using 387 emulator>");
+ else
+ printf(" <387 Emulator>");
+ npxinit(__INITIAL_NPXCW__);
+ return (1); /* XXX unused */
+}
+
+/*
+ * Initialize floating point unit.
+ */
+void
+npxinit(control)
+ u_int control;
+{
+ struct save87 dummy;
+
+ if (!npx_exists)
+ return;
+ /*
+ * fninit has the same h/w bugs as fnsave. Use the detoxified
+ * fnsave to throw away any junk in the fpu. fnsave initializes
+ * the fpu and sets npxproc = NULL as important side effects.
+ */
+ npxsave(&dummy);
+ stop_emulating();
+ fldcw(&control);
+ if (curpcb != NULL)
+ fnsave(&curpcb->pcb_savefpu);
+ start_emulating();
+}
+
+/*
+ * Free coprocessor (if we have it).
+ */
+void
+npxexit(p)
+ struct proc *p;
+{
+
+ if (p == npxproc) {
+ start_emulating();
+ npxproc = NULL;
+ }
+}
+
+/*
+ * Record the FPU state and reinitialize it all except for the control word.
+ * Then generate a SIGFPE.
+ *
+ * Reinitializing the state allows naive SIGFPE handlers to longjmp without
+ * doing any fixups.
+ *
+ * XXX there is currently no way to pass the full error state to signal
+ * handlers, and if this is a nested interrupt there is no way to pass even
+ * a status code! So there is no way to have a non-naive SIGFPE handler. At
+ * best a handler could do an fninit followed by an fldcw of a static value.
+ * fnclex would be of little use because it would leave junk on the FPU stack.
+ * Returning from the handler would be even less safe than usual because
+ * IRQ13 exception handling makes exceptions even less precise than usual.
+ */
+void
+npxintr(frame)
+ struct intrframe frame;
+{
+ int code;
+
+ if (npxproc == NULL || !npx_exists) {
+ /* XXX no %p in stand/printf.c. Cast to quiet gcc -Wall. */
+ printf("npxintr: npxproc = %lx, curproc = %lx, npx_exists = %d\n",
+ (u_long) npxproc, (u_long) curproc, npx_exists);
+ panic("npxintr from nowhere");
+ }
+ if (npxproc != curproc) {
+ printf("npxintr: npxproc = %lx, curproc = %lx, npx_exists = %d\n",
+ (u_long) npxproc, (u_long) curproc, npx_exists);
+ panic("npxintr from non-current process");
+ }
+ /*
+ * Save state. This does an implied fninit. It had better not halt
+ * the cpu or we'll hang.
+ */
+ outb(0xf0, 0);
+ fnsave(&curpcb->pcb_savefpu);
+ fwait();
+ /*
+ * Restore control word (was clobbered by fnsave).
+ */
+ fldcw(&curpcb->pcb_savefpu.sv_env.en_cw);
+ fwait();
+ /*
+ * Remember the exception status word and tag word. The current
+ * (almost fninit'ed) fpu state is in the fpu and the exception
+ * state just saved will soon be junk. However, the implied fninit
+ * doesn't change the error pointers or register contents, and we
+ * preserved the control word and will copy the status and tag
+ * words, so the complete exception state can be recovered.
+ */
+ curpcb->pcb_savefpu.sv_ex_sw = curpcb->pcb_savefpu.sv_env.en_sw;
+ curpcb->pcb_savefpu.sv_ex_tw = curpcb->pcb_savefpu.sv_env.en_tw;
+
+ /*
+ * Pass exception to process.
+ */
+ if (ISPL(frame.if_cs) == SEL_UPL) {
+ /*
+ * Interrupt is essentially a trap, so we can afford to call
+ * the SIGFPE handler (if any) as soon as the interrupt
+ * returns.
+ *
+ * XXX little or nothing is gained from this, and plenty is
+ * lost - the interrupt frame has to contain the trap frame
+ * (this is otherwise only necessary for the rescheduling trap
+ * in doreti, and the frame for that could easily be set up
+ * just before it is used).
+ */
+ curproc->p_regs = (int *)&frame.if_es;
+ curpcb->pcb_flags |= FM_TRAP; /* used by sendsig */
+#ifdef notyet
+ /*
+ * Encode the appropriate code for detailed information on
+ * this exception.
+ */
+ code = XXX_ENCODE(curpcb->pcb_savefpu.sv_ex_sw);
+#else
+ code = 0; /* XXX */
+#endif
+ trapsignal(curproc, SIGFPE, code);
+ curpcb->pcb_flags &= ~FM_TRAP;
+ } else {
+ /*
+ * Nested interrupt. These losers occur when:
+ * o an IRQ13 is bogusly generated at a bogus time, e.g.:
+ * o immediately after an fnsave or frstor of an
+ * error state.
+ * o a couple of 386 instructions after
+ * "fstpl _memvar" causes a stack overflow.
+ * These are especially nasty when combined with a
+ * trace trap.
+ * o an IRQ13 occurs at the same time as another higher-
+ * priority interrupt.
+ *
+ * Treat them like a true async interrupt.
+ */
+ psignal(npxproc, SIGFPE);
+ }
+}
+
+/*
+ * Implement device not available (DNA) exception
+ *
+ * It would be better to switch FP context here (only). This would require
+ * saving the state in the proc table instead of in the pcb.
+ */
+int
+npxdna()
+{
+ if (!npx_exists)
+ return (0);
+ if (npxproc != NULL) {
+ printf("npxdna: npxproc = %lx, curproc = %lx\n",
+ (u_long) npxproc, (u_long) curproc);
+ panic("npxdna");
+ }
+ stop_emulating();
+ /*
+ * Record new context early in case frstor causes an IRQ13.
+ */
+ npxproc = curproc;
+ /*
+ * The following frstor may cause an IRQ13 when the state being
+ * restored has a pending error. The error will appear to have been
+ * triggered by the current (npx) user instruction even when that
+ * instruction is a no-wait instruction that should not trigger an
+ * error (e.g., fnclex). On at least one 486 system all of the
+ * no-wait instructions are broken the same as frstor, so our
+ * treatment does not amplify the breakage. On at least one
+ * 386/Cyrix 387 system, fnclex works correctly while frstor and
+ * fnsave are broken, so our treatment breaks fnclex if it is the
+ * first FPU instruction after a context switch.
+ */
+ frstor(&curpcb->pcb_savefpu);
+
+ return (1);
+}
+
+/*
+ * Wrapper for fnsave instruction to handle h/w bugs. If there is an error
+ * pending, then fnsave generates a bogus IRQ13 on some systems. Force
+ * any IRQ13 to be handled immediately, and then ignore it. This routine is
+ * often called at splhigh so it must not use many system services. In
+ * particular, it's much easier to install a special handler than to
+ * guarantee that it's safe to use npxintr() and its supporting code.
+ */
+void
+npxsave(addr)
+ struct save87 *addr;
+{
+ u_char icu1_mask;
+ u_char icu2_mask;
+ u_char old_icu1_mask;
+ u_char old_icu2_mask;
+ struct gate_descriptor save_idt_npxintr;
+
+ disable_intr();
+ old_icu1_mask = inb(IO_ICU1 + 1);
+ old_icu2_mask = inb(IO_ICU2 + 1);
+ save_idt_npxintr = idt[npx_intrno];
+ outb(IO_ICU1 + 1, old_icu1_mask & ~(IRQ_SLAVE | npx0mask));
+ outb(IO_ICU2 + 1, old_icu2_mask & ~(npx0mask >> 8));
+ idt[npx_intrno] = npx_idt_probeintr;
+ enable_intr();
+ stop_emulating();
+ fnsave(addr);
+ fwait();
+ start_emulating();
+ npxproc = NULL;
+ disable_intr();
+ icu1_mask = inb(IO_ICU1 + 1); /* masks may have changed */
+ icu2_mask = inb(IO_ICU2 + 1);
+ outb(IO_ICU1 + 1,
+ (icu1_mask & ~npx0mask) | (old_icu1_mask & npx0mask));
+ outb(IO_ICU2 + 1,
+ (icu2_mask & ~(npx0mask >> 8))
+ | (old_icu2_mask & (npx0mask >> 8)));
+ idt[npx_intrno] = save_idt_npxintr;
+ enable_intr(); /* back to usual state */
+}
+
+#endif /* NNPX > 0 */
diff --git a/sys/amd64/isa/timerreg.h b/sys/amd64/isa/timerreg.h
new file mode 100644
index 0000000..72c7022
--- /dev/null
+++ b/sys/amd64/isa/timerreg.h
@@ -0,0 +1,89 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Header: timerreg.h,v 1.2 93/02/28 15:08:58 mccanne Exp $
+ *
+ * Register definitions for the Intel 8253 Programmable Interval Timer.
+ *
+ * This chip has three independent 16-bit down counters that can be
+ * read on the fly. There are three mode registers and three countdown
+ * registers. The countdown registers are addressed directly, via the
+ * first three I/O ports. The three mode registers are accessed via
+ * the fourth I/O port, with two bits in the mode byte indicating the
+ * register. (Why are hardware interfaces always so braindead?).
+ *
+ * To write a value into the countdown register, the mode register
+ * is first programmed with a command indicating the which byte of
+ * the two byte register is to be modified. The three possibilities
+ * are load msb (TMR_MR_MSB), load lsb (TMR_MR_LSB), or load lsb then
+ * msb (TMR_MR_BOTH).
+ *
+ * To read the current value ("on the fly") from the countdown register,
+ * you write a "latch" command into the mode register, then read the stable
+ * value from the corresponding I/O port. For example, you write
+ * TMR_MR_LATCH into the corresponding mode register. Presumably,
+ * after doing this, a write operation to the I/O port would result
+ * in undefined behavior (but hopefully not fry the chip).
+ * Reading in this manner has no side effects.
+ *
+ * The outputs of the three timers are connected as follows:
+ *
+ * timer 0 -> irq 0
+ * timer 1 -> dma chan 0 (for dram refresh)
+ * timer 2 -> speaker (via keyboard controller)
+ *
+ * Timer 0 is used to call hardclock.
+ * Timer 2 is used to generate console beeps.
+ */
+
+/*
+ * Macros for specifying values to be written into a mode register.
+ */
+#define TIMER_CNTR0 (IO_TIMER1 + 0) /* timer 0 counter port */
+#define TIMER_CNTR1 (IO_TIMER1 + 1) /* timer 1 counter port */
+#define TIMER_CNTR2 (IO_TIMER1 + 2) /* timer 2 counter port */
+#define TIMER_MODE (IO_TIMER1 + 3) /* timer mode port */
+#define TIMER_SEL0 0x00 /* select counter 0 */
+#define TIMER_SEL1 0x40 /* select counter 1 */
+#define TIMER_SEL2 0x80 /* select counter 2 */
+#define TIMER_INTTC 0x00 /* mode 0, intr on terminal cnt */
+#define TIMER_ONESHOT 0x02 /* mode 1, one shot */
+#define TIMER_RATEGEN 0x04 /* mode 2, rate generator */
+#define TIMER_SQWAVE 0x06 /* mode 3, square wave */
+#define TIMER_SWSTROBE 0x08 /* mode 4, s/w triggered strobe */
+#define TIMER_HWSTROBE 0x0a /* mode 5, h/w triggered strobe */
+#define TIMER_LATCH 0x00 /* latch counter for reading */
+#define TIMER_LSB 0x10 /* r/w counter LSB */
+#define TIMER_MSB 0x20 /* r/w counter MSB */
+#define TIMER_16BIT 0x30 /* r/w counter 16 bits, LSB first */
+#define TIMER_BCD 0x01 /* count in BCD */
+
diff --git a/sys/amd64/isa/vector.S b/sys/amd64/isa/vector.S
new file mode 100644
index 0000000..38ac79c
--- /dev/null
+++ b/sys/amd64/isa/vector.S
@@ -0,0 +1,376 @@
+/* vector.s */
+/*
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00167
+ * -------------------- ----- ----------------------
+ *
+ * 04 Jun 93 Bruce Evans Fixed irq_num vs id_num for multiple
+ * devices configed on the same irq with
+ * respect to ipending.
+ *
+ */
+
+#include "i386/isa/icu.h"
+#include "i386/isa/isa.h"
+#include "vector.h"
+
+#define ICU_EOI 0x20 /* XXX - define elsewhere */
+
+#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_BYTE(irq_num) ((irq_num) / 8)
+
+#define ENABLE_ICU1 \
+ movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
+ FASTER_NOP ; /* ... ASAP ... */ \
+ outb %al,$IO_ICU1 /* ... to clear in service bit */
+#ifdef AUTO_EOI_1
+#undef ENABLE_ICU1 /* we now use auto-EOI to reduce i/o */
+#define ENABLE_ICU1
+#endif
+
+#define ENABLE_ICU1_AND_2 \
+ movb $ICU_EOI,%al ; /* as above */ \
+ FASTER_NOP ; \
+ outb %al,$IO_ICU2 ; /* but do second icu first */ \
+ FASTER_NOP ; \
+ outb %al,$IO_ICU1 /* then first icu */
+#ifdef AUTO_EOI_2
+#undef ENABLE_ICU1_AND_2 /* data sheet says no auto-EOI on slave ... */
+#define ENABLE_ICU1_AND_2 /* ... but it works */
+#endif
+
+/*
+ * Macros for interrupt interrupt entry, call to handler, and exit.
+ *
+ * XXX - the interrupt frame is set up to look like a trap frame. This is
+ * usually a waste of time. The only interrupt handlers that want a frame
+ * are the clock handler (it wants a clock frame), the npx handler (it's
+ * easier to do right all in assembler). The interrupt return routine
+ * needs a trap frame for rare AST's (it could easily convert the frame).
+ * The direct costs of setting up a trap frame are two pushl's (error
+ * code and trap number), an addl to get rid of these, and pushing and
+ * popping the call-saved regs %esi, %edi and %ebp twice, The indirect
+ * costs are making the driver interface nonuniform so unpending of
+ * interrupts is more complicated and slower (call_driver(unit) would
+ * be easier than ensuring an interrupt frame for all handlers. Finally,
+ * there are some struct copies in the npx handler and maybe in the clock
+ * handler that could be avoided by working more with pointers to frames
+ * instead of frames.
+ *
+ * XXX - should we do a cld on every system entry to avoid the requirement
+ * for scattered cld's?
+ *
+ * Coding notes for *.s:
+ *
+ * If possible, avoid operations that involve an operand size override.
+ * Word-sized operations might be smaller, but the operand size override
+ * makes them slower on on 486's and no faster on 386's unless perhaps
+ * the instruction pipeline is depleted. E.g.,
+ *
+ * Use movl to seg regs instead of the equivalent but more descriptive
+ * movw - gas generates an irelevant (slower) operand size override.
+ *
+ * Use movl to ordinary regs in preference to movw and especially
+ * in preference to movz[bw]l. Use unsigned (long) variables with the
+ * top bits clear instead of unsigned short variables to provide more
+ * opportunities for movl.
+ *
+ * If possible, use byte-sized operations. They are smaller and no slower.
+ *
+ * Use (%reg) instead of 0(%reg) - gas generates larger code for the latter.
+ *
+ * If the interrupt frame is made more flexible, INTR can push %eax first
+ * and decide the ipending case with less overhead, e.g., by avoiding
+ * loading segregs.
+ */
+
+#define FAST_INTR(unit, irq_num, id_num, handler, enable_icus) \
+ pushl %eax ; /* save only call-used registers */ \
+ pushl %ecx ; \
+ pushl %edx ; \
+ pushl %ds ; \
+ /* pushl %es ; know compiler doesn't do string insns */ \
+ movl $KDSEL,%eax ; \
+ movl %ax,%ds ; \
+ /* movl %ax,%es ; */ \
+ SHOW_CLI ; /* although it interferes with "ASAP" */ \
+ pushl $unit ; \
+ call handler ; /* do the work ASAP */ \
+ enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
+ addl $4,%esp ; \
+ incl _cnt+V_INTR ; /* book-keeping can wait */ \
+ COUNT_EVENT(_intrcnt_actv, id_num) ; \
+ SHOW_STI ; \
+ /* popl %es ; */ \
+ popl %ds ; \
+ popl %edx; \
+ popl %ecx; \
+ popl %eax; \
+ iret
+
+#define INTR(unit, irq_num, id_num, mask, handler, icu, enable_icus, reg, stray) \
+ pushl $0 ; /* dummy error code */ \
+ pushl $T_ASTFLT ; \
+ pushal ; \
+ pushl %ds ; /* save our data and extra segments ... */ \
+ pushl %es ; \
+ movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
+ movl %ax,%ds ; /* ... early in case SHOW_A_LOT is on */ \
+ movl %ax,%es ; \
+ SHOW_CLI ; /* interrupt did an implicit cli */ \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ SHOW_IMEN ; \
+ FASTER_NOP ; \
+ outb %al,$icu+1 ; \
+ enable_icus ; \
+ incl _cnt+V_INTR ; /* tally interrupts */ \
+ movl _cpl,%eax ; \
+ testb $IRQ_BIT(irq_num),%reg ; \
+ jne 2f ; \
+1: ; \
+ COUNT_EVENT(_intrcnt_actv, id_num) ; \
+ movl _cpl,%eax ; \
+ pushl %eax ; \
+ pushl $unit ; \
+ orl mask,%eax ; \
+ movl %eax,_cpl ; \
+ SHOW_CPL ; \
+ SHOW_STI ; \
+ sti ; \
+ call handler ; \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ SHOW_IMEN ; \
+ FASTER_NOP ; \
+ outb %al,$icu+1 ; \
+ jmp doreti ; \
+; \
+ ALIGN_TEXT ; \
+2: ; \
+ COUNT_EVENT(_intrcnt_pend, id_num) ; \
+ movl $1b,%eax ; /* register resume address */ \
+ /* XXX - someday do it at attach time */ \
+ movl %eax,Vresume + (irq_num) * 4 ; \
+ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \
+ SHOW_IPENDING ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp ; \
+ iret
+
+/*
+ * vector.h has defined a macro 'BUILD_VECTORS' containing a big list of info
+ * about vectors, including a submacro 'BUILD_VECTOR' that operates on the
+ * info about each vector. We redefine 'BUILD_VECTOR' to expand the info
+ * in different ways. Here we expand it to a list of interrupt handlers.
+ * This order is of course unimportant. Elsewhere we expand it to inline
+ * linear search code for which the order is a little more important and
+ * concatenating the code with no holes is very important.
+ *
+ * XXX - now there is BUILD_FAST_VECTOR as well as BUILD_VECTOR.
+ *
+ * The info consists of the following items for each vector:
+ *
+ * name (identifier): name of the vector; used to build labels
+ * unit (expression): unit number to call the device driver with
+ * irq_num (number): number of the IRQ to handled (0-15)
+ * id_num (number): uniq numeric id for handler (assigned by config)
+ * mask (blank-ident): priority mask used
+ * handler (blank-ident): interrupt handler to call
+ * icu_num (number): (1 + irq_num / 8) converted for label building
+ * icu_enables (number): 1 for icu_num == 1, 1_AND_2 for icu_num == 2
+ * reg (blank-ident): al for icu_num == 1, ah for icu_num == 2
+ *
+ * 'irq_num' is converted in several ways at config time to get around
+ * limitations in cpp. The macros have blanks after commas iff they would
+ * not mess up identifiers and numbers.
+ */
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .globl handler ; \
+ .text ; \
+ .globl _V/**/name ; \
+ SUPERALIGN_TEXT ; \
+_V/**/name: ; \
+ FAST_INTR(unit, irq_num, id_num, handler, ENABLE_ICU/**/icu_enables)
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .globl handler ; \
+ .text ; \
+ .globl _V/**/name ; \
+ SUPERALIGN_TEXT ; \
+_V/**/name: ; \
+ INTR(unit,irq_num,id_num, mask, handler, IO_ICU/**/icu_num, \
+ ENABLE_ICU/**/icu_enables, reg,)
+
+ BUILD_VECTORS
+
+ /* hardware interrupt catcher (IDT 32 - 47) */
+ .globl _isa_strayintr
+
+#define STRAYINTR(irq_num, icu_num, icu_enables, reg) \
+IDTVEC(intr/**/irq_num) ; \
+ INTR(irq_num,irq_num,irq_num, _highmask, _isa_strayintr, \
+ IO_ICU/**/icu_num, ENABLE_ICU/**/icu_enables, reg,stray)
+
+/*
+ * XXX - the mask (1 << 2) == IRQ_SLAVE will be generated for IRQ 2, instead
+ * of the mask IRQ2 (defined as IRQ9 == (1 << 9)). But IRQ 2 "can't happen".
+ * In fact, all stray interrupts "can't happen" except for bugs. The
+ * "stray" IRQ 7 is documented behaviour of the 8259. It happens when there
+ * is a glitch on any of its interrupt inputs. Does it really interrupt when
+ * IRQ 7 is masked?
+ *
+ * XXX - unpend doesn't work for these, it sends them to the real handler.
+ *
+ * XXX - the race bug during initialization may be because I changed the
+ * order of switching from the stray to the real interrupt handler to before
+ * enabling interrupts. The old order looked unsafe but maybe it is OK with
+ * the stray interrupt handler installed. But these handlers only reduce
+ * the window of vulnerability - it is still open at the end of
+ * isa_configure().
+ *
+ * XXX - many comments are stale.
+ */
+
+ STRAYINTR(0,1,1, al)
+ STRAYINTR(1,1,1, al)
+ STRAYINTR(2,1,1, al)
+ STRAYINTR(3,1,1, al)
+ STRAYINTR(4,1,1, al)
+ STRAYINTR(5,1,1, al)
+ STRAYINTR(6,1,1, al)
+ STRAYINTR(8,2,1_AND_2, ah)
+ STRAYINTR(9,2,1_AND_2, ah)
+ STRAYINTR(10,2,1_AND_2, ah)
+ STRAYINTR(11,2,1_AND_2, ah)
+ STRAYINTR(12,2,1_AND_2, ah)
+ STRAYINTR(13,2,1_AND_2, ah)
+ STRAYINTR(14,2,1_AND_2, ah)
+ STRAYINTR(15,2,1_AND_2, ah)
+IDTVEC(intrdefault)
+ STRAYINTR(7,1,1, al) /* XXX */
+#if 0
+ INTRSTRAY(255, _highmask, 255) ; call _isa_strayintr ; INTREXIT2
+#endif
+/*
+ * These are the interrupt counters, I moved them here from icu.s so that
+ * they are with the name table. rgrimes
+ *
+ * There are now lots of counters, this has been redone to work with
+ * Bruce Evans intr-0.1 code, which I modified some more to make it all
+ * work with vmstat.
+ */
+ .data
+Vresume: .space 16 * 4 /* where to resume intr handler after unpend */
+ .globl _intrcnt
+_intrcnt: /* used by vmstat to calc size of table */
+ .globl _intrcnt_bad7
+_intrcnt_bad7: .space 4 /* glitches on irq 7 */
+ .globl _intrcnt_bad15
+_intrcnt_bad15: .space 4 /* glitches on irq 15 */
+ .globl _intrcnt_stray
+_intrcnt_stray: .space 4 /* total count of stray interrupts */
+ .globl _intrcnt_actv
+_intrcnt_actv: .space NR_REAL_INT_HANDLERS * 4 /* active interrupts */
+ .globl _intrcnt_pend
+_intrcnt_pend: .space NR_REAL_INT_HANDLERS * 4 /* pending interrupts */
+ .globl _intrcnt_spl
+_intrcnt_spl: .space 32 * 4 /* XXX 32 should not be hard coded ? */
+ .globl _intrcnt_show
+_intrcnt_show: .space 8 * 4 /* XXX 16 should not be hard coded ? */
+ .globl _eintrcnt
+_eintrcnt: /* used by vmstat to calc size of table */
+
+/*
+ * Build the interrupt name table for vmstat
+ */
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR BUILD_VECTOR
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .ascii "name irq" ; \
+ .asciz "irq_num"
+/*
+ * XXX - use the STRING and CONCAT macros from <sys/cdefs.h> to stringize
+ * and concatenate names above and elsewhere.
+ */
+
+ .text
+ .globl _intrnames, _eintrnames
+_intrnames:
+ BUILD_VECTOR(bad,,7,,,,,,)
+ BUILD_VECTOR(bad,,15,,,,,,)
+ BUILD_VECTOR(stray,,,,,,,,)
+ BUILD_VECTORS
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR BUILD_VECTOR
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .asciz "name pend"
+
+ BUILD_VECTORS
+
+/*
+ * now the spl names
+ */
+ .asciz "unpend_v"
+ .asciz "doreti"
+ .asciz "p0!ni"
+ .asciz "!p0!ni"
+ .asciz "p0ni"
+ .asciz "netisr_raw"
+ .asciz "netisr_ip"
+ .asciz "netisr_imp"
+ .asciz "netisr_ns"
+ .asciz "softclock"
+ .asciz "trap"
+ .asciz "doreti_exit2"
+ .asciz "splbio"
+ .asciz "splclock"
+ .asciz "splhigh"
+ .asciz "splimp"
+ .asciz "splnet"
+ .asciz "splsoftclock"
+ .asciz "spltty"
+ .asciz "spl0"
+ .asciz "netisr_raw2"
+ .asciz "netisr_ip2"
+ .asciz "splx"
+ .asciz "splx!0"
+ .asciz "unpend_V"
+ .asciz "spl25" /* spl25-spl31 are spares */
+ .asciz "spl26"
+ .asciz "spl27"
+ .asciz "spl28"
+ .asciz "spl29"
+ .asciz "spl30"
+ .asciz "spl31"
+/*
+ * now the mask names
+ */
+ .asciz "cli"
+ .asciz "cpl"
+ .asciz "imen"
+ .asciz "ipending"
+ .asciz "sti"
+ .asciz "mask5" /* mask5-mask7 are spares */
+ .asciz "mask6"
+ .asciz "mask7"
+
+_eintrnames:
diff --git a/sys/amd64/isa/vector.s b/sys/amd64/isa/vector.s
new file mode 100644
index 0000000..38ac79c
--- /dev/null
+++ b/sys/amd64/isa/vector.s
@@ -0,0 +1,376 @@
+/* vector.s */
+/*
+ * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
+ * -------------------- ----- ----------------------
+ * CURRENT PATCH LEVEL: 1 00167
+ * -------------------- ----- ----------------------
+ *
+ * 04 Jun 93 Bruce Evans Fixed irq_num vs id_num for multiple
+ * devices configed on the same irq with
+ * respect to ipending.
+ *
+ */
+
+#include "i386/isa/icu.h"
+#include "i386/isa/isa.h"
+#include "vector.h"
+
+#define ICU_EOI 0x20 /* XXX - define elsewhere */
+
+#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_BYTE(irq_num) ((irq_num) / 8)
+
+#define ENABLE_ICU1 \
+ movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
+ FASTER_NOP ; /* ... ASAP ... */ \
+ outb %al,$IO_ICU1 /* ... to clear in service bit */
+#ifdef AUTO_EOI_1
+#undef ENABLE_ICU1 /* we now use auto-EOI to reduce i/o */
+#define ENABLE_ICU1
+#endif
+
+#define ENABLE_ICU1_AND_2 \
+ movb $ICU_EOI,%al ; /* as above */ \
+ FASTER_NOP ; \
+ outb %al,$IO_ICU2 ; /* but do second icu first */ \
+ FASTER_NOP ; \
+ outb %al,$IO_ICU1 /* then first icu */
+#ifdef AUTO_EOI_2
+#undef ENABLE_ICU1_AND_2 /* data sheet says no auto-EOI on slave ... */
+#define ENABLE_ICU1_AND_2 /* ... but it works */
+#endif
+
+/*
+ * Macros for interrupt interrupt entry, call to handler, and exit.
+ *
+ * XXX - the interrupt frame is set up to look like a trap frame. This is
+ * usually a waste of time. The only interrupt handlers that want a frame
+ * are the clock handler (it wants a clock frame), the npx handler (it's
+ * easier to do right all in assembler). The interrupt return routine
+ * needs a trap frame for rare AST's (it could easily convert the frame).
+ * The direct costs of setting up a trap frame are two pushl's (error
+ * code and trap number), an addl to get rid of these, and pushing and
+ * popping the call-saved regs %esi, %edi and %ebp twice, The indirect
+ * costs are making the driver interface nonuniform so unpending of
+ * interrupts is more complicated and slower (call_driver(unit) would
+ * be easier than ensuring an interrupt frame for all handlers. Finally,
+ * there are some struct copies in the npx handler and maybe in the clock
+ * handler that could be avoided by working more with pointers to frames
+ * instead of frames.
+ *
+ * XXX - should we do a cld on every system entry to avoid the requirement
+ * for scattered cld's?
+ *
+ * Coding notes for *.s:
+ *
+ * If possible, avoid operations that involve an operand size override.
+ * Word-sized operations might be smaller, but the operand size override
+ * makes them slower on on 486's and no faster on 386's unless perhaps
+ * the instruction pipeline is depleted. E.g.,
+ *
+ * Use movl to seg regs instead of the equivalent but more descriptive
+ * movw - gas generates an irelevant (slower) operand size override.
+ *
+ * Use movl to ordinary regs in preference to movw and especially
+ * in preference to movz[bw]l. Use unsigned (long) variables with the
+ * top bits clear instead of unsigned short variables to provide more
+ * opportunities for movl.
+ *
+ * If possible, use byte-sized operations. They are smaller and no slower.
+ *
+ * Use (%reg) instead of 0(%reg) - gas generates larger code for the latter.
+ *
+ * If the interrupt frame is made more flexible, INTR can push %eax first
+ * and decide the ipending case with less overhead, e.g., by avoiding
+ * loading segregs.
+ */
+
+#define FAST_INTR(unit, irq_num, id_num, handler, enable_icus) \
+ pushl %eax ; /* save only call-used registers */ \
+ pushl %ecx ; \
+ pushl %edx ; \
+ pushl %ds ; \
+ /* pushl %es ; know compiler doesn't do string insns */ \
+ movl $KDSEL,%eax ; \
+ movl %ax,%ds ; \
+ /* movl %ax,%es ; */ \
+ SHOW_CLI ; /* although it interferes with "ASAP" */ \
+ pushl $unit ; \
+ call handler ; /* do the work ASAP */ \
+ enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
+ addl $4,%esp ; \
+ incl _cnt+V_INTR ; /* book-keeping can wait */ \
+ COUNT_EVENT(_intrcnt_actv, id_num) ; \
+ SHOW_STI ; \
+ /* popl %es ; */ \
+ popl %ds ; \
+ popl %edx; \
+ popl %ecx; \
+ popl %eax; \
+ iret
+
+#define INTR(unit, irq_num, id_num, mask, handler, icu, enable_icus, reg, stray) \
+ pushl $0 ; /* dummy error code */ \
+ pushl $T_ASTFLT ; \
+ pushal ; \
+ pushl %ds ; /* save our data and extra segments ... */ \
+ pushl %es ; \
+ movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
+ movl %ax,%ds ; /* ... early in case SHOW_A_LOT is on */ \
+ movl %ax,%es ; \
+ SHOW_CLI ; /* interrupt did an implicit cli */ \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ SHOW_IMEN ; \
+ FASTER_NOP ; \
+ outb %al,$icu+1 ; \
+ enable_icus ; \
+ incl _cnt+V_INTR ; /* tally interrupts */ \
+ movl _cpl,%eax ; \
+ testb $IRQ_BIT(irq_num),%reg ; \
+ jne 2f ; \
+1: ; \
+ COUNT_EVENT(_intrcnt_actv, id_num) ; \
+ movl _cpl,%eax ; \
+ pushl %eax ; \
+ pushl $unit ; \
+ orl mask,%eax ; \
+ movl %eax,_cpl ; \
+ SHOW_CPL ; \
+ SHOW_STI ; \
+ sti ; \
+ call handler ; \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ SHOW_IMEN ; \
+ FASTER_NOP ; \
+ outb %al,$icu+1 ; \
+ jmp doreti ; \
+; \
+ ALIGN_TEXT ; \
+2: ; \
+ COUNT_EVENT(_intrcnt_pend, id_num) ; \
+ movl $1b,%eax ; /* register resume address */ \
+ /* XXX - someday do it at attach time */ \
+ movl %eax,Vresume + (irq_num) * 4 ; \
+ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \
+ SHOW_IPENDING ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp ; \
+ iret
+
+/*
+ * vector.h has defined a macro 'BUILD_VECTORS' containing a big list of info
+ * about vectors, including a submacro 'BUILD_VECTOR' that operates on the
+ * info about each vector. We redefine 'BUILD_VECTOR' to expand the info
+ * in different ways. Here we expand it to a list of interrupt handlers.
+ * This order is of course unimportant. Elsewhere we expand it to inline
+ * linear search code for which the order is a little more important and
+ * concatenating the code with no holes is very important.
+ *
+ * XXX - now there is BUILD_FAST_VECTOR as well as BUILD_VECTOR.
+ *
+ * The info consists of the following items for each vector:
+ *
+ * name (identifier): name of the vector; used to build labels
+ * unit (expression): unit number to call the device driver with
+ * irq_num (number): number of the IRQ to handled (0-15)
+ * id_num (number): uniq numeric id for handler (assigned by config)
+ * mask (blank-ident): priority mask used
+ * handler (blank-ident): interrupt handler to call
+ * icu_num (number): (1 + irq_num / 8) converted for label building
+ * icu_enables (number): 1 for icu_num == 1, 1_AND_2 for icu_num == 2
+ * reg (blank-ident): al for icu_num == 1, ah for icu_num == 2
+ *
+ * 'irq_num' is converted in several ways at config time to get around
+ * limitations in cpp. The macros have blanks after commas iff they would
+ * not mess up identifiers and numbers.
+ */
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .globl handler ; \
+ .text ; \
+ .globl _V/**/name ; \
+ SUPERALIGN_TEXT ; \
+_V/**/name: ; \
+ FAST_INTR(unit, irq_num, id_num, handler, ENABLE_ICU/**/icu_enables)
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .globl handler ; \
+ .text ; \
+ .globl _V/**/name ; \
+ SUPERALIGN_TEXT ; \
+_V/**/name: ; \
+ INTR(unit,irq_num,id_num, mask, handler, IO_ICU/**/icu_num, \
+ ENABLE_ICU/**/icu_enables, reg,)
+
+ BUILD_VECTORS
+
+ /* hardware interrupt catcher (IDT 32 - 47) */
+ .globl _isa_strayintr
+
+#define STRAYINTR(irq_num, icu_num, icu_enables, reg) \
+IDTVEC(intr/**/irq_num) ; \
+ INTR(irq_num,irq_num,irq_num, _highmask, _isa_strayintr, \
+ IO_ICU/**/icu_num, ENABLE_ICU/**/icu_enables, reg,stray)
+
+/*
+ * XXX - the mask (1 << 2) == IRQ_SLAVE will be generated for IRQ 2, instead
+ * of the mask IRQ2 (defined as IRQ9 == (1 << 9)). But IRQ 2 "can't happen".
+ * In fact, all stray interrupts "can't happen" except for bugs. The
+ * "stray" IRQ 7 is documented behaviour of the 8259. It happens when there
+ * is a glitch on any of its interrupt inputs. Does it really interrupt when
+ * IRQ 7 is masked?
+ *
+ * XXX - unpend doesn't work for these, it sends them to the real handler.
+ *
+ * XXX - the race bug during initialization may be because I changed the
+ * order of switching from the stray to the real interrupt handler to before
+ * enabling interrupts. The old order looked unsafe but maybe it is OK with
+ * the stray interrupt handler installed. But these handlers only reduce
+ * the window of vulnerability - it is still open at the end of
+ * isa_configure().
+ *
+ * XXX - many comments are stale.
+ */
+
+ STRAYINTR(0,1,1, al)
+ STRAYINTR(1,1,1, al)
+ STRAYINTR(2,1,1, al)
+ STRAYINTR(3,1,1, al)
+ STRAYINTR(4,1,1, al)
+ STRAYINTR(5,1,1, al)
+ STRAYINTR(6,1,1, al)
+ STRAYINTR(8,2,1_AND_2, ah)
+ STRAYINTR(9,2,1_AND_2, ah)
+ STRAYINTR(10,2,1_AND_2, ah)
+ STRAYINTR(11,2,1_AND_2, ah)
+ STRAYINTR(12,2,1_AND_2, ah)
+ STRAYINTR(13,2,1_AND_2, ah)
+ STRAYINTR(14,2,1_AND_2, ah)
+ STRAYINTR(15,2,1_AND_2, ah)
+IDTVEC(intrdefault)
+ STRAYINTR(7,1,1, al) /* XXX */
+#if 0
+ INTRSTRAY(255, _highmask, 255) ; call _isa_strayintr ; INTREXIT2
+#endif
+/*
+ * These are the interrupt counters, I moved them here from icu.s so that
+ * they are with the name table. rgrimes
+ *
+ * There are now lots of counters, this has been redone to work with
+ * Bruce Evans intr-0.1 code, which I modified some more to make it all
+ * work with vmstat.
+ */
+ .data
+Vresume: .space 16 * 4 /* where to resume intr handler after unpend */
+ .globl _intrcnt
+_intrcnt: /* used by vmstat to calc size of table */
+ .globl _intrcnt_bad7
+_intrcnt_bad7: .space 4 /* glitches on irq 7 */
+ .globl _intrcnt_bad15
+_intrcnt_bad15: .space 4 /* glitches on irq 15 */
+ .globl _intrcnt_stray
+_intrcnt_stray: .space 4 /* total count of stray interrupts */
+ .globl _intrcnt_actv
+_intrcnt_actv: .space NR_REAL_INT_HANDLERS * 4 /* active interrupts */
+ .globl _intrcnt_pend
+_intrcnt_pend: .space NR_REAL_INT_HANDLERS * 4 /* pending interrupts */
+ .globl _intrcnt_spl
+_intrcnt_spl: .space 32 * 4 /* XXX 32 should not be hard coded ? */
+ .globl _intrcnt_show
+_intrcnt_show: .space 8 * 4 /* XXX 16 should not be hard coded ? */
+ .globl _eintrcnt
+_eintrcnt: /* used by vmstat to calc size of table */
+
+/*
+ * Build the interrupt name table for vmstat
+ */
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR BUILD_VECTOR
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .ascii "name irq" ; \
+ .asciz "irq_num"
+/*
+ * XXX - use the STRING and CONCAT macros from <sys/cdefs.h> to stringize
+ * and concatenate names above and elsewhere.
+ */
+
+ .text
+ .globl _intrnames, _eintrnames
+_intrnames:
+ BUILD_VECTOR(bad,,7,,,,,,)
+ BUILD_VECTOR(bad,,15,,,,,,)
+ BUILD_VECTOR(stray,,,,,,,,)
+ BUILD_VECTORS
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR BUILD_VECTOR
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .asciz "name pend"
+
+ BUILD_VECTORS
+
+/*
+ * now the spl names
+ */
+ .asciz "unpend_v"
+ .asciz "doreti"
+ .asciz "p0!ni"
+ .asciz "!p0!ni"
+ .asciz "p0ni"
+ .asciz "netisr_raw"
+ .asciz "netisr_ip"
+ .asciz "netisr_imp"
+ .asciz "netisr_ns"
+ .asciz "softclock"
+ .asciz "trap"
+ .asciz "doreti_exit2"
+ .asciz "splbio"
+ .asciz "splclock"
+ .asciz "splhigh"
+ .asciz "splimp"
+ .asciz "splnet"
+ .asciz "splsoftclock"
+ .asciz "spltty"
+ .asciz "spl0"
+ .asciz "netisr_raw2"
+ .asciz "netisr_ip2"
+ .asciz "splx"
+ .asciz "splx!0"
+ .asciz "unpend_V"
+ .asciz "spl25" /* spl25-spl31 are spares */
+ .asciz "spl26"
+ .asciz "spl27"
+ .asciz "spl28"
+ .asciz "spl29"
+ .asciz "spl30"
+ .asciz "spl31"
+/*
+ * now the mask names
+ */
+ .asciz "cli"
+ .asciz "cpl"
+ .asciz "imen"
+ .asciz "ipending"
+ .asciz "sti"
+ .asciz "mask5" /* mask5-mask7 are spares */
+ .asciz "mask6"
+ .asciz "mask7"
+
+_eintrnames:
OpenPOWER on IntegriCloud