summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/Makefile25
-rw-r--r--sys/amd64/amd64/autoconf.c209
-rw-r--r--sys/amd64/amd64/cpu_switch.S458
-rw-r--r--sys/amd64/amd64/db_disasm.c1375
-rw-r--r--sys/amd64/amd64/db_interface.c240
-rw-r--r--sys/amd64/amd64/db_trace.c340
-rw-r--r--sys/amd64/amd64/exception.S275
-rw-r--r--sys/amd64/amd64/exception.s275
-rw-r--r--sys/amd64/amd64/fpu.c554
-rw-r--r--sys/amd64/amd64/genassym.c192
-rw-r--r--sys/amd64/amd64/locore.S518
-rw-r--r--sys/amd64/amd64/locore.s518
-rw-r--r--sys/amd64/amd64/machdep.c1556
-rw-r--r--sys/amd64/amd64/mem.c259
-rw-r--r--sys/amd64/amd64/pmap.c1944
-rw-r--r--sys/amd64/amd64/support.S1221
-rw-r--r--sys/amd64/amd64/support.s1221
-rw-r--r--sys/amd64/amd64/swtch.s458
-rw-r--r--sys/amd64/amd64/sys_machdep.c328
-rw-r--r--sys/amd64/amd64/trap.c692
-rw-r--r--sys/amd64/amd64/tsc.c442
-rw-r--r--sys/amd64/amd64/vm_machdep.c1246
-rw-r--r--sys/amd64/include/asmacros.h49
-rw-r--r--sys/amd64/include/cpu.h118
-rw-r--r--sys/amd64/include/cpufunc.h108
-rw-r--r--sys/amd64/include/cputypes.h53
-rw-r--r--sys/amd64/include/db_machdep.h120
-rw-r--r--sys/amd64/include/exec.h128
-rw-r--r--sys/amd64/include/float.h72
-rw-r--r--sys/amd64/include/floatingpoint.h109
-rw-r--r--sys/amd64/include/fpu.h141
-rw-r--r--sys/amd64/include/frame.h140
-rw-r--r--sys/amd64/include/npx.h141
-rw-r--r--sys/amd64/include/pc/display.h45
-rw-r--r--sys/amd64/include/pcb.h93
-rw-r--r--sys/amd64/include/pmap.h217
-rw-r--r--sys/amd64/include/proc.h50
-rw-r--r--sys/amd64/include/profile.h56
-rw-r--r--sys/amd64/include/psl.h65
-rw-r--r--sys/amd64/include/ptrace.h40
-rw-r--r--sys/amd64/include/reg.h106
-rw-r--r--sys/amd64/include/reloc.h44
-rw-r--r--sys/amd64/include/segments.h235
-rw-r--r--sys/amd64/include/signal.h75
-rw-r--r--sys/amd64/include/specialreg.h64
-rw-r--r--sys/amd64/include/sysarch.h24
-rw-r--r--sys/amd64/include/trap.h101
-rw-r--r--sys/amd64/include/tss.h82
-rw-r--r--sys/amd64/include/varargs.h62
-rw-r--r--sys/amd64/include/vmparam.h263
-rw-r--r--sys/amd64/isa/clock.c442
-rw-r--r--sys/amd64/isa/icu.h97
-rw-r--r--sys/amd64/isa/isa.c671
-rw-r--r--sys/amd64/isa/isa.h181
-rw-r--r--sys/amd64/isa/npx.c554
-rw-r--r--sys/amd64/isa/timerreg.h93
-rw-r--r--sys/amd64/isa/vector.S360
-rw-r--r--sys/amd64/isa/vector.s360
58 files changed, 19905 insertions, 0 deletions
diff --git a/sys/amd64/Makefile b/sys/amd64/Makefile
new file mode 100644
index 0000000..4ad5a34
--- /dev/null
+++ b/sys/amd64/Makefile
@@ -0,0 +1,25 @@
+# from: @(#)Makefile 7.3 (Berkeley) 6/9/91
+# $Id$
+
+# Makefile for i386 tags file
+
+all:
+ @echo "make tags or links only"
+
+TI386= ../i386/tags
+SI386= ../i386/i386/*.[ch] ../i386/include/*.h ../i386/isa/*.[ch]
+AI386= ../i386/i386/*.s
+
+# Directories in which to place i386 tags links
+DI386= eisa isa mca include
+
+tags:
+ -ctags -dtf ${TI386} ${COMM} ${SI386}
+ egrep "^ENTRY(.*)|^ALTENTRY(.*)" ${AI386} | \
+ sed "s;\([^:]*\):\([^(]*\)(\([^, )]*\)\(.*\);\3 \1 /^\2(\3\4$$/;" \
+ >> ${TI386}
+ sort -o ${TI386} ${TI386}
+
+links:
+ -for i in ${DI386}; do \
+ cd $$i && rm -f tags; ln -s ../tags tags; done
diff --git a/sys/amd64/amd64/autoconf.c b/sys/amd64/amd64/autoconf.c
new file mode 100644
index 0000000..3575d1c
--- /dev/null
+++ b/sys/amd64/amd64/autoconf.c
@@ -0,0 +1,209 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)autoconf.c 7.1 (Berkeley) 5/9/91
+ * $Id: autoconf.c,v 1.10 1994/03/21 14:53:08 ache Exp $
+ */
+
+/*
+ * Setup the system to run on the current machine.
+ *
+ * Configure() is called at boot time and initializes the vba
+ * device tables and the memory controller monitoring. Available
+ * devices are determined (from possibilities mentioned in ioconf.c),
+ * and the drivers are initialized.
+ */
+#include "param.h"
+#include "systm.h"
+#include "buf.h"
+#include "dkstat.h"
+#include "conf.h"
+#include "dmap.h"
+#include "reboot.h"
+#include "kernel.h"
+
+#include "machine/pte.h"
+
+static void swapconf(void);
+static void setroot(void);
+
+/*
+ * The following several variables are related to
+ * the configuration process, and are used in initializing
+ * the machine.
+ */
+int dkn; /* number of iostat dk numbers assigned so far */
+extern int cold; /* cold start flag initialized in locore.s */
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+void
+configure()
+{
+
+#include "isa.h"
+#if NISA > 0
+ isa_configure();
+#endif
+
+#if GENERICxxx && !defined(DISKLESS)
+ if ((boothowto & RB_ASKNAME) == 0)
+ setroot();
+ setconf();
+#else
+#ifndef DISKLESS
+ setroot();
+#endif
+#endif
+ /*
+ * Configure swap area and related system
+ * parameter based on device(s) used.
+ */
+ swapconf();
+ cold = 0;
+}
+
+/*
+ * Configure swap space and related parameters.
+ */
+static void
+swapconf()
+{
+ register struct swdevt *swp;
+ register int nblks;
+ extern int Maxmem;
+
+ for (swp = swdevt; swp->sw_dev > 0; swp++)
+ {
+ unsigned d = major(swp->sw_dev);
+
+ if (d > nblkdev) break;
+ if (bdevsw[d].d_psize) {
+ nblks = (*bdevsw[d].d_psize)(swp->sw_dev);
+ if (nblks > 0 &&
+ (swp->sw_nblks == 0 || swp->sw_nblks > nblks))
+ swp->sw_nblks = nblks;
+ else
+ swp->sw_nblks = 0;
+ }
+ swp->sw_nblks = ctod(dtoc(swp->sw_nblks));
+ }
+ if (dumplo == 0 && bdevsw[major(dumpdev)].d_psize)
+ dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) -
+ Maxmem*NBPG/512;
+ if (dumplo < 0)
+ dumplo = 0;
+}
+
+#define DOSWAP /* change swdevt and dumpdev */
+u_long bootdev = 0; /* should be dev_t, but not until 32 bits */
+
+static char devname[][2] = {
+ 'w','d', /* 0 = wd */
+ 's','w', /* 1 = sw */
+#define FDMAJOR 2
+ 'f','d', /* 2 = fd */
+ 'w','t', /* 3 = wt */
+ 's','d', /* 4 = sd -- new SCSI system */
+};
+
+#define PARTITIONMASK 0x7
+#define PARTITIONSHIFT 3
+#define FDUNITSHIFT 6
+
+/*
+ * Attempt to find the device from which we were booted.
+ * If we can do so, and not instructed not to do so,
+ * change rootdev to correspond to the load device.
+ */
+static void
+setroot()
+{
+ int majdev, mindev, unit, part, adaptor;
+ dev_t temp = 0, orootdev;
+ struct swdevt *swp;
+
+/*printf("howto %x bootdev %x ", boothowto, bootdev);*/
+ if (boothowto & RB_DFLTROOT ||
+ (bootdev & B_MAGICMASK) != (u_long)B_DEVMAGIC)
+ return;
+ majdev = (bootdev >> B_TYPESHIFT) & B_TYPEMASK;
+ if (majdev > sizeof(devname) / sizeof(devname[0]))
+ return;
+ adaptor = (bootdev >> B_ADAPTORSHIFT) & B_ADAPTORMASK;
+ unit = (bootdev >> B_UNITSHIFT) & B_UNITMASK;
+ if (majdev == FDMAJOR) {
+ part = 3; /* raw */
+ mindev = unit << FDUNITSHIFT;
+ }
+ else {
+ part = (bootdev >> B_PARTITIONSHIFT) & B_PARTITIONMASK;
+ mindev = (unit << PARTITIONSHIFT) + part;
+ }
+ orootdev = rootdev;
+ rootdev = makedev(majdev, mindev);
+ /*
+ * If the original rootdev is the same as the one
+ * just calculated, don't need to adjust the swap configuration.
+ */
+ if (rootdev == orootdev)
+ return;
+ printf("changing root device to %c%c%d%c\n",
+ devname[majdev][0], devname[majdev][1],
+ mindev >> (majdev == FDMAJOR ? FDUNITSHIFT : PARTITIONSHIFT),
+ part + 'a');
+#ifdef DOSWAP
+ mindev &= ~PARTITIONMASK;
+ for (swp = swdevt; swp->sw_dev; swp++) {
+ if (majdev == major(swp->sw_dev) &&
+ mindev == (minor(swp->sw_dev) & ~PARTITIONMASK)) {
+
+ temp = swdevt[0].sw_dev;
+ swdevt[0].sw_dev = swp->sw_dev;
+ swp->sw_dev = temp;
+ break;
+ }
+ }
+ if (swp->sw_dev == 0)
+ return;
+ /*
+ * If dumpdev was the same as the old primary swap
+ * device, move it to the new primary swap device.
+ */
+ if (temp == dumpdev)
+ dumpdev = swdevt[0].sw_dev;
+#endif
+}
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
new file mode 100644
index 0000000..aa8b5ba
--- /dev/null
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -0,0 +1,458 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: swtch.s,v 1.5 1994/04/02 07:00:30 davidg Exp $
+ */
+
+#include "npx.h" /* for NNPX */
+#include "assym.s" /* for preprocessor defines */
+#include "errno.h" /* for error codes */
+
+#include "machine/asmacros.h" /* for miscellaneous assembly macros */
+#define LOCORE /* XXX inhibit C declarations */
+#include "machine/spl.h" /* for SWI_AST_MASK ... */
+
+
+/*****************************************************************************/
+/* Scheduling */
+/*****************************************************************************/
+
+/*
+ * The following primitives manipulate the run queues.
+ * _whichqs tells which of the 32 queues _qs
+ * have processes in them. setrunqueue puts processes into queues, Remrq
+ * removes them from queues. The running process is on no queue,
+ * other processes are on a queue related to p->p_priority, divided by 4
+ * actually to shrink the 0-127 range of priorities into the 32 available
+ * queues.
+ */
+ .data
+ .globl _curpcb, _whichqs
+_curpcb: .long 0 /* pointer to curproc's PCB area */
+_whichqs: .long 0 /* which run queues have data */
+
+ .globl _qs,_cnt,_panic
+ .comm _noproc,4
+ .comm _runrun,4
+
+ .globl _want_resched
+_want_resched: .long 0 /* we need to re-run the scheduler */
+
+ .text
+/*
+ * setrunqueue(p)
+ *
+ * Call should be made at spl6(), and p->p_stat should be SRUN
+ */
+ENTRY(setrunqueue)
+ movl 4(%esp),%eax
+ cmpl $0,P_RLINK(%eax) /* should not be on q already */
+ je set1
+ pushl $set2
+ call _panic
+set1:
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btsl %edx,_whichqs /* set q full bit */
+ shll $3,%edx
+ addl $_qs,%edx /* locate q hdr */
+ movl %edx,P_LINK(%eax) /* link process on tail of q */
+ movl P_RLINK(%edx),%ecx
+ movl %ecx,P_RLINK(%eax)
+ movl %eax,P_RLINK(%edx)
+ movl %eax,P_LINK(%ecx)
+ ret
+
+set2: .asciz "setrunqueue"
+
+/*
+ * Remrq(p)
+ *
+ * Call should be made at spl6().
+ */
+ENTRY(remrq)
+ movl 4(%esp),%eax
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btrl %edx,_whichqs /* clear full bit, panic if clear already */
+ jb rem1
+ pushl $rem3
+ call _panic
+rem1:
+ pushl %edx
+ movl P_LINK(%eax),%ecx /* unlink process */
+ movl P_RLINK(%eax),%edx
+ movl %edx,P_RLINK(%ecx)
+ movl P_RLINK(%eax),%ecx
+ movl P_LINK(%eax),%edx
+ movl %edx,P_LINK(%ecx)
+ popl %edx
+ movl $_qs,%ecx
+ shll $3,%edx
+ addl %edx,%ecx
+ cmpl P_LINK(%ecx),%ecx /* q still has something? */
+ je rem2
+ shrl $3,%edx /* yes, set bit as still full */
+ btsl %edx,_whichqs
+rem2:
+ movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
+ ret
+
+rem3: .asciz "remrq"
+sw0: .asciz "cpu_switch"
+
+/*
+ * When no processes are on the runq, cpu_switch() branches to _idle
+ * to wait for something to come ready.
+ */
+ ALIGN_TEXT
+_idle:
+ MCOUNT
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3
+ movl $tmpstk-4,%esp
+ sti
+
+ /*
+ * XXX callers of cpu_switch() do a bogus splclock(). Locking should
+ * be left to cpu_switch().
+ */
+ movl $SWI_AST_MASK,_cpl
+ testl $~SWI_AST_MASK,_ipending
+ je idle_loop
+ call _splz
+
+ ALIGN_TEXT
+idle_loop:
+ cli
+ cmpl $0,_whichqs
+ jne sw1a
+ sti
+ hlt /* wait for interrupt */
+ jmp idle_loop
+
+badsw:
+ pushl $sw0
+ call _panic
+ /*NOTREACHED*/
+
+/*
+ * cpu_switch()
+ */
+ENTRY(cpu_switch)
+ incl _cnt+V_SWTCH
+
+ /* switch to new process. first, save context as needed */
+
+ movl _curproc,%ecx
+
+ /* if no process to save, don't bother */
+ testl %ecx,%ecx
+ je sw1
+
+ movl P_ADDR(%ecx),%ecx
+
+ movl (%esp),%eax /* Hardware registers */
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /* have we used fp, and need a save? */
+ mov _curproc,%eax
+ cmp %eax,_npxproc
+ jne 1f
+ pushl %ecx /* h/w bugs make saving complicated */
+ leal PCB_SAVEFPU(%ecx),%eax
+ pushl %eax
+ call _npxsave /* do it in a big C function */
+ popl %eax
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%eax /* save temporary map PTE */
+ movl %eax,PCB_CMAP2(%ecx) /* in our context */
+ movl $0,_curproc /* out of process */
+
+# movw _cpl,%ax
+# movw %ax,PCB_IML(%ecx) /* save ipl */
+
+ /* save is done, now choose a new process or idle */
+sw1:
+ cli
+sw1a:
+ movl _whichqs,%edi
+2:
+ /* XXX - bsf is sloow */
+ bsfl %edi,%eax /* find a full q */
+ je _idle /* if none, idle */
+
+ /* XX update whichqs? */
+ btrl %eax,%edi /* clear q full status */
+ jnb 2b /* if it was clear, look for another */
+ movl %eax,%ebx /* save which one we are using */
+
+ shll $3,%eax
+ addl $_qs,%eax /* select q */
+ movl %eax,%esi
+
+#ifdef DIAGNOSTIC
+ cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
+ je badsw /* not possible */
+#endif
+
+ movl P_LINK(%eax),%ecx /* unlink from front of process q */
+ movl P_LINK(%ecx),%edx
+ movl %edx,P_LINK(%eax)
+ movl P_RLINK(%ecx),%eax
+ movl %eax,P_RLINK(%edx)
+
+ cmpl P_LINK(%ecx),%esi /* q empty */
+ je 3f
+ btsl %ebx,%edi /* nope, set to indicate full */
+3:
+ movl %edi,_whichqs /* update q status */
+
+ movl $0,%eax
+ movl %eax,_want_resched
+
+#ifdef DIAGNOSTIC
+ cmpl %eax,P_WCHAN(%ecx)
+ jne badsw
+ cmpb $SRUN,P_STAT(%ecx)
+ jne badsw
+#endif
+
+ movl %eax,P_RLINK(%ecx) /* isolate process to run */
+ movl P_ADDR(%ecx),%edx
+ movl PCB_CR3(%edx),%ebx
+
+ /* switch address space */
+ movl %ebx,%cr3
+
+ /* restore context */
+ movl PCB_EBX(%edx),%ebx
+ movl PCB_ESP(%edx),%esp
+ movl PCB_EBP(%edx),%ebp
+ movl PCB_ESI(%edx),%esi
+ movl PCB_EDI(%edx),%edi
+ movl PCB_EIP(%edx),%eax
+ movl %eax,(%esp)
+
+ movl PCB_CMAP2(%edx),%eax /* get temporary map */
+ movl %eax,_CMAP2 /* reload temporary map PTE */
+
+ movl %ecx,_curproc /* into next process */
+ movl %edx,_curpcb
+
+#ifdef USER_LDT
+ cmpl $0, PCB_USERLDT(%edx)
+ jnz 1f
+ movl __default_ldt,%eax
+ cmpl _currentldt,%eax
+ je 2f
+ lldt __default_ldt
+ movl %eax,_currentldt
+ jmp 2f
+1: pushl %edx
+ call _set_user_ldt
+ popl %edx
+2:
+#endif
+
+ pushl %edx /* save p to return */
+/*
+ * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
+ * I think restoring the cpl is unnecessary, but we must turn off the cli
+ * now that spl*() don't do it as a side affect.
+ */
+ pushl PCB_IML(%edx)
+ sti
+#if 0
+ call _splx
+#endif
+ addl $4,%esp
+/*
+ * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
+ * same way. Better return a value.
+ */
+ popl %eax /* return(p); */
+ ret
+
+ENTRY(mvesp)
+ movl %esp,%eax
+ ret
+/*
+ * struct proc *swtch_to_inactive(struct proc *p);
+ *
+ * At exit of a process, move off the address space of the
+ * process and onto a "safe" one. Then, on a temporary stack
+ * return and run code that disposes of the old state.
+ * Since this code requires a parameter from the "old" stack,
+ * pass it back as a return value.
+ */
+ENTRY(swtch_to_inactive)
+ popl %edx /* old pc */
+ popl %eax /* arg, our return value */
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3 /* good bye address space */
+ #write buffer?
+ movl $tmpstk-4,%esp /* temporary stack, compensated for call */
+ MEXITCOUNT
+ jmp %edx /* return, execute remainder of cleanup */
+
+/*
+ * savectx(pcb, altreturn)
+ * Update pcb, saving current processor state and arranging
+ * for alternate return ala longjmp in cpu_switch if altreturn is true.
+ */
+ENTRY(savectx)
+ movl 4(%esp),%ecx
+ movw _cpl,%ax
+ movw %ax,PCB_IML(%ecx)
+ movl (%esp),%eax
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /*
+ * If npxproc == NULL, then the npx h/w state is irrelevant and the
+ * state had better already be in the pcb. This is true for forks
+ * but not for dumps (the old book-keeping with FP flags in the pcb
+ * always lost for dumps because the dump pcb has 0 flags).
+ *
+ * If npxproc != NULL, then we have to save the npx h/w state to
+ * npxproc's pcb and copy it to the requested pcb, or save to the
+ * requested pcb and reload. Copying is easier because we would
+ * have to handle h/w bugs for reloading. We used to lose the
+ * parent's npx state for forks by forgetting to reload.
+ */
+ mov _npxproc,%eax
+ testl %eax,%eax
+ je 1f
+
+ pushl %ecx
+ movl P_ADDR(%eax),%eax
+ leal PCB_SAVEFPU(%eax),%eax
+ pushl %eax
+ pushl %eax
+ call _npxsave
+ popl %eax
+ popl %eax
+ popl %ecx
+
+ pushl %ecx
+ pushl $108+8*2 /* XXX h/w state size + padding */
+ leal PCB_SAVEFPU(%ecx),%ecx
+ pushl %ecx
+ pushl %eax
+ call _bcopy
+ addl $12,%esp
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%edx /* save temporary map PTE */
+ movl %edx,PCB_CMAP2(%ecx) /* in our context */
+
+ cmpl $0,8(%esp)
+ je 1f
+ movl %esp,%edx /* relocate current sp relative to pcb */
+ subl $_kstack,%edx /* (sp is relative to kstack): */
+ addl %edx,%ecx /* pcb += sp - kstack; */
+ movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
+
+/* this mess deals with replicating register state gcc hides */
+ movl 12(%esp),%eax
+ movl %eax,12(%ecx)
+ movl 16(%esp),%eax
+ movl %eax,16(%ecx)
+ movl 20(%esp),%eax
+ movl %eax,20(%ecx)
+ movl 24(%esp),%eax
+ movl %eax,24(%ecx)
+1:
+ xorl %eax,%eax /* return 0 */
+ ret
+
+/*
+ * addupc(int pc, struct uprof *up, int ticks):
+ * update profiling information for the user process.
+ */
+ENTRY(addupc)
+ pushl %ebp
+ movl %esp,%ebp
+ movl 12(%ebp),%edx /* up */
+ movl 8(%ebp),%eax /* pc */
+
+ subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
+ jb L1 /* if (pc was < off) return */
+
+ shrl $1,%eax /* praddr = pc >> 1 */
+ imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
+ shrl $15,%eax /* praddr = praddr << 15 */
+ andl $-2,%eax /* praddr &= ~1 */
+
+ cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
+ ja L1
+
+/* addl %eax,%eax /* praddr -> word offset */
+ addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
+ movl 16(%ebp),%ecx /* ticks */
+
+ movl _curpcb,%edx
+ movl $proffault,PCB_ONFAULT(%edx)
+ addl %ecx,(%eax) /* storage location += ticks */
+ movl $0,PCB_ONFAULT(%edx)
+L1:
+ leave
+ ret
+
+ ALIGN_TEXT
+proffault:
+ /* if we get a fault, then kill profiling all together */
+ movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
+ movl 12(%ebp),%ecx
+ movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
+ leave
+ ret
diff --git a/sys/amd64/amd64/db_disasm.c b/sys/amd64/amd64/db_disasm.c
new file mode 100644
index 0000000..98e251b
--- /dev/null
+++ b/sys/amd64/amd64/db_disasm.c
@@ -0,0 +1,1375 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $Id: db_disasm.c,v 1.4 1993/11/25 01:30:51 wollman Exp $
+ */
+
+/*
+ * Instruction disassembler.
+ */
+#include "param.h"
+#include "systm.h"
+#include "proc.h"
+#include "ddb/ddb.h"
+
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+
+/*
+ * Size attributes
+ */
+#define BYTE 0
+#define WORD 1
+#define LONG 2
+#define QUAD 3
+#define SNGL 4
+#define DBLR 5
+#define EXTR 6
+#define SDEP 7
+#define NONE 8
+
+/*
+ * Addressing modes
+ */
+#define E 1 /* general effective address */
+#define Eind 2 /* indirect address (jump, call) */
+#define Ew 3 /* address, word size */
+#define Eb 4 /* address, byte size */
+#define R 5 /* register, in 'reg' field */
+#define Rw 6 /* word register, in 'reg' field */
+#define Ri 7 /* register in instruction */
+#define S 8 /* segment reg, in 'reg' field */
+#define Si 9 /* segment reg, in instruction */
+#define A 10 /* accumulator */
+#define BX 11 /* (bx) */
+#define CL 12 /* cl, for shifts */
+#define DX 13 /* dx, for IO */
+#define SI 14 /* si */
+#define DI 15 /* di */
+#define CR 16 /* control register */
+#define DR 17 /* debug register */
+#define TR 18 /* test register */
+#define I 19 /* immediate, unsigned */
+#define Is 20 /* immediate, signed */
+#define Ib 21 /* byte immediate, unsigned */
+#define Ibs 22 /* byte immediate, signed */
+#define Iw 23 /* word immediate, unsigned */
+#define Il 24 /* long immediate */
+#define O 25 /* direct address */
+#define Db 26 /* byte displacement from EIP */
+#define Dl 27 /* long displacement from EIP */
+#define o1 28 /* constant 1 */
+#define o3 29 /* constant 3 */
+#define OS 30 /* immediate offset/segment */
+#define ST 31 /* FP stack top */
+#define STI 32 /* FP stack */
+#define X 33 /* extended FP op */
+#define XA 34 /* for 'fstcw %ax' */
+
+struct inst {
+ char * i_name; /* name */
+ short i_has_modrm; /* has regmodrm byte */
+ short i_size; /* operand size */
+ int i_mode; /* addressing modes */
+ char * i_extra; /* pointer to extra opcode table */
+};
+
+#define op1(x) (x)
+#define op2(x,y) ((x)|((y)<<8))
+#define op3(x,y,z) ((x)|((y)<<8)|((z)<<16))
+
+struct finst {
+ char * f_name; /* name for memory instruction */
+ int f_size; /* size for memory instruction */
+ int f_rrmode; /* mode for rr instruction */
+ char * f_rrname; /* name for rr instruction
+ (or pointer to table) */
+};
+
+char * db_Grp6[] = {
+ "sldt",
+ "str",
+ "lldt",
+ "ltr",
+ "verr",
+ "verw",
+ "",
+ ""
+};
+
+char * db_Grp7[] = {
+ "sgdt",
+ "sidt",
+ "lgdt",
+ "lidt",
+ "smsw",
+ "",
+ "lmsw",
+ "invlpg"
+};
+
+char * db_Grp8[] = {
+ "",
+ "",
+ "",
+ "",
+ "bt",
+ "bts",
+ "btr",
+ "btc"
+};
+
+struct inst db_inst_0f0x[] = {
+/*00*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp6 },
+/*01*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp7 },
+/*02*/ { "lar", TRUE, LONG, op2(E,R), 0 },
+/*03*/ { "lsl", TRUE, LONG, op2(E,R), 0 },
+/*04*/ { "", FALSE, NONE, 0, 0 },
+/*05*/ { "", FALSE, NONE, 0, 0 },
+/*06*/ { "clts", FALSE, NONE, 0, 0 },
+/*07*/ { "", FALSE, NONE, 0, 0 },
+
+/*08*/ { "invd", FALSE, NONE, 0, 0 },
+/*09*/ { "wbinvd",FALSE, NONE, 0, 0 },
+/*0a*/ { "", FALSE, NONE, 0, 0 },
+/*0b*/ { "", FALSE, NONE, 0, 0 },
+/*0c*/ { "", FALSE, NONE, 0, 0 },
+/*0d*/ { "", FALSE, NONE, 0, 0 },
+/*0e*/ { "", FALSE, NONE, 0, 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f2x[] = {
+/*20*/ { "mov", TRUE, LONG, op2(CR,E), 0 }, /* use E for reg */
+/*21*/ { "mov", TRUE, LONG, op2(DR,E), 0 }, /* since mod == 11 */
+/*22*/ { "mov", TRUE, LONG, op2(E,CR), 0 },
+/*23*/ { "mov", TRUE, LONG, op2(E,DR), 0 },
+/*24*/ { "mov", TRUE, LONG, op2(TR,E), 0 },
+/*25*/ { "", FALSE, NONE, 0, 0 },
+/*26*/ { "mov", TRUE, LONG, op2(E,TR), 0 },
+/*27*/ { "", FALSE, NONE, 0, 0 },
+
+/*28*/ { "", FALSE, NONE, 0, 0 },
+/*29*/ { "", FALSE, NONE, 0, 0 },
+/*2a*/ { "", FALSE, NONE, 0, 0 },
+/*2b*/ { "", FALSE, NONE, 0, 0 },
+/*2c*/ { "", FALSE, NONE, 0, 0 },
+/*2d*/ { "", FALSE, NONE, 0, 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f8x[] = {
+/*80*/ { "jo", FALSE, NONE, op1(Dl), 0 },
+/*81*/ { "jno", FALSE, NONE, op1(Dl), 0 },
+/*82*/ { "jb", FALSE, NONE, op1(Dl), 0 },
+/*83*/ { "jnb", FALSE, NONE, op1(Dl), 0 },
+/*84*/ { "jz", FALSE, NONE, op1(Dl), 0 },
+/*85*/ { "jnz", FALSE, NONE, op1(Dl), 0 },
+/*86*/ { "jbe", FALSE, NONE, op1(Dl), 0 },
+/*87*/ { "jnbe", FALSE, NONE, op1(Dl), 0 },
+
+/*88*/ { "js", FALSE, NONE, op1(Dl), 0 },
+/*89*/ { "jns", FALSE, NONE, op1(Dl), 0 },
+/*8a*/ { "jp", FALSE, NONE, op1(Dl), 0 },
+/*8b*/ { "jnp", FALSE, NONE, op1(Dl), 0 },
+/*8c*/ { "jl", FALSE, NONE, op1(Dl), 0 },
+/*8d*/ { "jnl", FALSE, NONE, op1(Dl), 0 },
+/*8e*/ { "jle", FALSE, NONE, op1(Dl), 0 },
+/*8f*/ { "jnle", FALSE, NONE, op1(Dl), 0 },
+};
+
+struct inst db_inst_0f9x[] = {
+/*90*/ { "seto", TRUE, NONE, op1(Eb), 0 },
+/*91*/ { "setno", TRUE, NONE, op1(Eb), 0 },
+/*92*/ { "setb", TRUE, NONE, op1(Eb), 0 },
+/*93*/ { "setnb", TRUE, NONE, op1(Eb), 0 },
+/*94*/ { "setz", TRUE, NONE, op1(Eb), 0 },
+/*95*/ { "setnz", TRUE, NONE, op1(Eb), 0 },
+/*96*/ { "setbe", TRUE, NONE, op1(Eb), 0 },
+/*97*/ { "setnbe",TRUE, NONE, op1(Eb), 0 },
+
+/*98*/ { "sets", TRUE, NONE, op1(Eb), 0 },
+/*99*/ { "setns", TRUE, NONE, op1(Eb), 0 },
+/*9a*/ { "setp", TRUE, NONE, op1(Eb), 0 },
+/*9b*/ { "setnp", TRUE, NONE, op1(Eb), 0 },
+/*9c*/ { "setl", TRUE, NONE, op1(Eb), 0 },
+/*9d*/ { "setnl", TRUE, NONE, op1(Eb), 0 },
+/*9e*/ { "setle", TRUE, NONE, op1(Eb), 0 },
+/*9f*/ { "setnle",TRUE, NONE, op1(Eb), 0 },
+};
+
+struct inst db_inst_0fax[] = {
+/*a0*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a1*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*a2*/ { "", FALSE, NONE, 0, 0 },
+/*a3*/ { "bt", TRUE, LONG, op2(E,R), 0 },
+/*a4*/ { "shld", TRUE, LONG, op3(Ib,E,R), 0 },
+/*a5*/ { "shld", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "", FALSE, NONE, 0, 0 },
+
+/*a8*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a9*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*aa*/ { "", FALSE, NONE, 0, 0 },
+/*ab*/ { "bts", TRUE, LONG, op2(E,R), 0 },
+/*ac*/ { "shrd", TRUE, LONG, op3(Ib,E,R), 0 },
+/*ad*/ { "shrd", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "imul", TRUE, LONG, op2(E,R), 0 },
+};
+
+struct inst db_inst_0fbx[] = {
+/*b0*/ { "", FALSE, NONE, 0, 0 },
+/*b1*/ { "", FALSE, NONE, 0, 0 },
+/*b2*/ { "lss", TRUE, LONG, op2(E, R), 0 },
+/*b3*/ { "bts", TRUE, LONG, op2(R, E), 0 },
+/*b4*/ { "lfs", TRUE, LONG, op2(E, R), 0 },
+/*b5*/ { "lgs", TRUE, LONG, op2(E, R), 0 },
+/*b6*/ { "movzb", TRUE, LONG, op2(E, R), 0 },
+/*b7*/ { "movzw", TRUE, LONG, op2(E, R), 0 },
+
+/*b8*/ { "", FALSE, NONE, 0, 0 },
+/*b9*/ { "", FALSE, NONE, 0, 0 },
+/*ba*/ { "", TRUE, LONG, op2(Is, E), (char *)db_Grp8 },
+/*bb*/ { "btc", TRUE, LONG, op2(R, E), 0 },
+/*bc*/ { "bsf", TRUE, LONG, op2(E, R), 0 },
+/*bd*/ { "bsr", TRUE, LONG, op2(E, R), 0 },
+/*be*/ { "movsb", TRUE, LONG, op2(E, R), 0 },
+/*bf*/ { "movsw", TRUE, LONG, op2(E, R), 0 },
+};
+
+struct inst db_inst_0fcx[] = {
+/*c0*/ { "xadd", TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "xadd", TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*c9*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ca*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cb*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cc*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cd*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ce*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cf*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+};
+
+struct inst db_inst_0fdx[] = {
+/*c0*/ { "cmpxchg",TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "cmpxchg",TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "", FALSE, NONE, 0, 0 },
+/*c9*/ { "", FALSE, NONE, 0, 0 },
+/*ca*/ { "", FALSE, NONE, 0, 0 },
+/*cb*/ { "", FALSE, NONE, 0, 0 },
+/*cc*/ { "", FALSE, NONE, 0, 0 },
+/*cd*/ { "", FALSE, NONE, 0, 0 },
+/*ce*/ { "", FALSE, NONE, 0, 0 },
+/*cf*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst *db_inst_0f[] = {
+ db_inst_0f0x,
+ 0,
+ db_inst_0f2x,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ db_inst_0f8x,
+ db_inst_0f9x,
+ db_inst_0fax,
+ db_inst_0fbx,
+ db_inst_0fcx,
+ db_inst_0fdx,
+ 0,
+ 0
+};
+
+char * db_Esc92[] = {
+ "fnop", "", "", "", "", "", "", ""
+};
+char * db_Esc93[] = {
+ "", "", "", "", "", "", "", ""
+};
+char * db_Esc94[] = {
+ "fchs", "fabs", "", "", "ftst", "fxam", "", ""
+};
+char * db_Esc95[] = {
+ "fld1", "fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz",""
+};
+char * db_Esc96[] = {
+ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp",
+ "fincstp"
+};
+char * db_Esc97[] = {
+ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"
+};
+
+char * db_Esca4[] = {
+ "", "fucompp","", "", "", "", "", ""
+};
+
+char * db_Escb4[] = {
+ "", "", "fnclex","fninit","", "", "", ""
+};
+
+char * db_Esce3[] = {
+ "", "fcompp","", "", "", "", "", ""
+};
+
+char * db_Escf4[] = {
+ "fnstsw","", "", "", "", "", "", ""
+};
+
+struct finst db_Esc8[] = {
+/*0*/ { "fadd", SNGL, op2(STI,ST), 0 },
+/*1*/ { "fmul", SNGL, op2(STI,ST), 0 },
+/*2*/ { "fcom", SNGL, op2(STI,ST), 0 },
+/*3*/ { "fcomp", SNGL, op2(STI,ST), 0 },
+/*4*/ { "fsub", SNGL, op2(STI,ST), 0 },
+/*5*/ { "fsubr", SNGL, op2(STI,ST), 0 },
+/*6*/ { "fdiv", SNGL, op2(STI,ST), 0 },
+/*7*/ { "fdivr", SNGL, op2(STI,ST), 0 },
+};
+
+struct finst db_Esc9[] = {
+/*0*/ { "fld", SNGL, op1(STI), 0 },
+/*1*/ { "", NONE, op1(STI), "fxch" },
+/*2*/ { "fst", SNGL, op1(X), (char *)db_Esc92 },
+/*3*/ { "fstp", SNGL, op1(X), (char *)db_Esc93 },
+/*4*/ { "fldenv", NONE, op1(X), (char *)db_Esc94 },
+/*5*/ { "fldcw", NONE, op1(X), (char *)db_Esc95 },
+/*6*/ { "fnstenv",NONE, op1(X), (char *)db_Esc96 },
+/*7*/ { "fnstcw", NONE, op1(X), (char *)db_Esc97 },
+};
+
+struct finst db_Esca[] = {
+/*0*/ { "fiadd", WORD, 0, 0 },
+/*1*/ { "fimul", WORD, 0, 0 },
+/*2*/ { "ficom", WORD, 0, 0 },
+/*3*/ { "ficomp", WORD, 0, 0 },
+/*4*/ { "fisub", WORD, op1(X), (char *)db_Esca4 },
+/*5*/ { "fisubr", WORD, 0, 0 },
+/*6*/ { "fidiv", WORD, 0, 0 },
+/*7*/ { "fidivr", WORD, 0, 0 }
+};
+
+struct finst db_Escb[] = {
+/*0*/ { "fild", WORD, 0, 0 },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fist", WORD, 0, 0 },
+/*3*/ { "fistp", WORD, 0, 0 },
+/*4*/ { "", WORD, op1(X), (char *)db_Escb4 },
+/*5*/ { "fld", EXTR, 0, 0 },
+/*6*/ { "", WORD, 0, 0 },
+/*7*/ { "fstp", EXTR, 0, 0 },
+};
+
+struct finst db_Escc[] = {
+/*0*/ { "fadd", DBLR, op2(ST,STI), 0 },
+/*1*/ { "fmul", DBLR, op2(ST,STI), 0 },
+/*2*/ { "fcom", DBLR, op2(ST,STI), 0 },
+/*3*/ { "fcomp", DBLR, op2(ST,STI), 0 },
+/*4*/ { "fsub", DBLR, op2(ST,STI), "fsubr" },
+/*5*/ { "fsubr", DBLR, op2(ST,STI), "fsub" },
+/*6*/ { "fdiv", DBLR, op2(ST,STI), "fdivr" },
+/*7*/ { "fdivr", DBLR, op2(ST,STI), "fdiv" },
+};
+
+struct finst db_Escd[] = {
+/*0*/ { "fld", DBLR, op1(STI), "ffree" },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fst", DBLR, op1(STI), 0 },
+/*3*/ { "fstp", DBLR, op1(STI), 0 },
+/*4*/ { "frstor", NONE, op1(STI), "fucom" },
+/*5*/ { "", NONE, op1(STI), "fucomp" },
+/*6*/ { "fnsave", NONE, 0, 0 },
+/*7*/ { "fnstsw", NONE, 0, 0 },
+};
+
+struct finst db_Esce[] = {
+/*0*/ { "fiadd", LONG, op2(ST,STI), "faddp" },
+/*1*/ { "fimul", LONG, op2(ST,STI), "fmulp" },
+/*2*/ { "ficom", LONG, 0, 0 },
+/*3*/ { "ficomp", LONG, op1(X), (char *)db_Esce3 },
+/*4*/ { "fisub", LONG, op2(ST,STI), "fsubrp" },
+/*5*/ { "fisubr", LONG, op2(ST,STI), "fsubp" },
+/*6*/ { "fidiv", LONG, op2(ST,STI), "fdivrp" },
+/*7*/ { "fidivr", LONG, op2(ST,STI), "fdivp" },
+};
+
+struct finst db_Escf[] = {
+/*0*/ { "fild", LONG, 0, 0 },
+/*1*/ { "", LONG, 0, 0 },
+/*2*/ { "fist", LONG, 0, 0 },
+/*3*/ { "fistp", LONG, 0, 0 },
+/*4*/ { "fbld", NONE, op1(XA), (char *)db_Escf4 },
+/*5*/ { "fld", QUAD, 0, 0 },
+/*6*/ { "fbstp", NONE, 0, 0 },
+/*7*/ { "fstp", QUAD, 0, 0 },
+};
+
+struct finst *db_Esc_inst[] = {
+ db_Esc8, db_Esc9, db_Esca, db_Escb,
+ db_Escc, db_Escd, db_Esce, db_Escf
+};
+
+char * db_Grp1[] = {
+ "add",
+ "or",
+ "adc",
+ "sbb",
+ "and",
+ "sub",
+ "xor",
+ "cmp"
+};
+
+char * db_Grp2[] = {
+ "rol",
+ "ror",
+ "rcl",
+ "rcr",
+ "shl",
+ "shr",
+ "shl",
+ "sar"
+};
+
+struct inst db_Grp3[] = {
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "not", TRUE, NONE, op1(E), 0 },
+ { "neg", TRUE, NONE, op1(E), 0 },
+ { "mul", TRUE, NONE, op2(E,A), 0 },
+ { "imul", TRUE, NONE, op2(E,A), 0 },
+ { "div", TRUE, NONE, op2(E,A), 0 },
+ { "idiv", TRUE, NONE, op2(E,A), 0 },
+};
+
+struct inst db_Grp4[] = {
+ { "inc", TRUE, BYTE, op1(E), 0 },
+ { "dec", TRUE, BYTE, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_Grp5[] = {
+ { "inc", TRUE, LONG, op1(E), 0 },
+ { "dec", TRUE, LONG, op1(E), 0 },
+ { "call", TRUE, NONE, op1(Eind),0 },
+ { "lcall", TRUE, NONE, op1(Eind),0 },
+ { "jmp", TRUE, NONE, op1(Eind),0 },
+ { "ljmp", TRUE, NONE, op1(Eind),0 },
+ { "push", TRUE, LONG, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_inst_table[256] = {
+/*00*/ { "add", TRUE, BYTE, op2(R, E), 0 },
+/*01*/ { "add", TRUE, LONG, op2(R, E), 0 },
+/*02*/ { "add", TRUE, BYTE, op2(E, R), 0 },
+/*03*/ { "add", TRUE, LONG, op2(E, R), 0 },
+/*04*/ { "add", FALSE, BYTE, op2(Is, A), 0 },
+/*05*/ { "add", FALSE, LONG, op2(Is, A), 0 },
+/*06*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*07*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*08*/ { "or", TRUE, BYTE, op2(R, E), 0 },
+/*09*/ { "or", TRUE, LONG, op2(R, E), 0 },
+/*0a*/ { "or", TRUE, BYTE, op2(E, R), 0 },
+/*0b*/ { "or", TRUE, LONG, op2(E, R), 0 },
+/*0c*/ { "or", FALSE, BYTE, op2(I, A), 0 },
+/*0d*/ { "or", FALSE, LONG, op2(I, A), 0 },
+/*0e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+
+/*10*/ { "adc", TRUE, BYTE, op2(R, E), 0 },
+/*11*/ { "adc", TRUE, LONG, op2(R, E), 0 },
+/*12*/ { "adc", TRUE, BYTE, op2(E, R), 0 },
+/*13*/ { "adc", TRUE, LONG, op2(E, R), 0 },
+/*14*/ { "adc", FALSE, BYTE, op2(Is, A), 0 },
+/*15*/ { "adc", FALSE, LONG, op2(Is, A), 0 },
+/*16*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*17*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*18*/ { "sbb", TRUE, BYTE, op2(R, E), 0 },
+/*19*/ { "sbb", TRUE, LONG, op2(R, E), 0 },
+/*1a*/ { "sbb", TRUE, BYTE, op2(E, R), 0 },
+/*1b*/ { "sbb", TRUE, LONG, op2(E, R), 0 },
+/*1c*/ { "sbb", FALSE, BYTE, op2(Is, A), 0 },
+/*1d*/ { "sbb", FALSE, LONG, op2(Is, A), 0 },
+/*1e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*1f*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*20*/ { "and", TRUE, BYTE, op2(R, E), 0 },
+/*21*/ { "and", TRUE, LONG, op2(R, E), 0 },
+/*22*/ { "and", TRUE, BYTE, op2(E, R), 0 },
+/*23*/ { "and", TRUE, LONG, op2(E, R), 0 },
+/*24*/ { "and", FALSE, BYTE, op2(I, A), 0 },
+/*25*/ { "and", FALSE, LONG, op2(I, A), 0 },
+/*26*/ { "", FALSE, NONE, 0, 0 },
+/*27*/ { "aaa", FALSE, NONE, 0, 0 },
+
+/*28*/ { "sub", TRUE, BYTE, op2(R, E), 0 },
+/*29*/ { "sub", TRUE, LONG, op2(R, E), 0 },
+/*2a*/ { "sub", TRUE, BYTE, op2(E, R), 0 },
+/*2b*/ { "sub", TRUE, LONG, op2(E, R), 0 },
+/*2c*/ { "sub", FALSE, BYTE, op2(Is, A), 0 },
+/*2d*/ { "sub", FALSE, LONG, op2(Is, A), 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "das", FALSE, NONE, 0, 0 },
+
+/*30*/ { "xor", TRUE, BYTE, op2(R, E), 0 },
+/*31*/ { "xor", TRUE, LONG, op2(R, E), 0 },
+/*32*/ { "xor", TRUE, BYTE, op2(E, R), 0 },
+/*33*/ { "xor", TRUE, LONG, op2(E, R), 0 },
+/*34*/ { "xor", FALSE, BYTE, op2(I, A), 0 },
+/*35*/ { "xor", FALSE, LONG, op2(I, A), 0 },
+/*36*/ { "", FALSE, NONE, 0, 0 },
+/*37*/ { "daa", FALSE, NONE, 0, 0 },
+
+/*38*/ { "cmp", TRUE, BYTE, op2(R, E), 0 },
+/*39*/ { "cmp", TRUE, LONG, op2(R, E), 0 },
+/*3a*/ { "cmp", TRUE, BYTE, op2(E, R), 0 },
+/*3b*/ { "cmp", TRUE, LONG, op2(E, R), 0 },
+/*3c*/ { "cmp", FALSE, BYTE, op2(Is, A), 0 },
+/*3d*/ { "cmp", FALSE, LONG, op2(Is, A), 0 },
+/*3e*/ { "", FALSE, NONE, 0, 0 },
+/*3f*/ { "aas", FALSE, NONE, 0, 0 },
+
+/*40*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*41*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*42*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*43*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*44*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*45*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*46*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*47*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+
+/*48*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*49*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4a*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4b*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4c*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4d*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4e*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4f*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+
+/*50*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*51*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*52*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*53*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*54*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*55*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*56*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*57*/ { "push", FALSE, LONG, op1(Ri), 0 },
+
+/*58*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*59*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5a*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5b*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5c*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5d*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5e*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5f*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+
+/*60*/ { "pusha", FALSE, LONG, 0, 0 },
+/*61*/ { "popa", FALSE, LONG, 0, 0 },
+/*62*/ { "bound", TRUE, LONG, op2(E, R), 0 },
+/*63*/ { "arpl", TRUE, NONE, op2(Ew,Rw), 0 },
+
+/*64*/ { "", FALSE, NONE, 0, 0 },
+/*65*/ { "", FALSE, NONE, 0, 0 },
+/*66*/ { "", FALSE, NONE, 0, 0 },
+/*67*/ { "", FALSE, NONE, 0, 0 },
+
+/*68*/ { "push", FALSE, LONG, op1(I), 0 },
+/*69*/ { "imul", TRUE, LONG, op3(I,E,R), 0 },
+/*6a*/ { "push", FALSE, LONG, op1(Ib), 0 },
+/*6b*/ { "imul", TRUE, LONG, op3(Ibs,E,R),0 },
+/*6c*/ { "ins", FALSE, BYTE, op2(DX, DI), 0 },
+/*6d*/ { "ins", FALSE, LONG, op2(DX, DI), 0 },
+/*6e*/ { "outs", FALSE, BYTE, op2(SI, DX), 0 },
+/*6f*/ { "outs", FALSE, LONG, op2(SI, DX), 0 },
+
+/*70*/ { "jo", FALSE, NONE, op1(Db), 0 },
+/*71*/ { "jno", FALSE, NONE, op1(Db), 0 },
+/*72*/ { "jb", FALSE, NONE, op1(Db), 0 },
+/*73*/ { "jnb", FALSE, NONE, op1(Db), 0 },
+/*74*/ { "jz", FALSE, NONE, op1(Db), 0 },
+/*75*/ { "jnz", FALSE, NONE, op1(Db), 0 },
+/*76*/ { "jbe", FALSE, NONE, op1(Db), 0 },
+/*77*/ { "jnbe", FALSE, NONE, op1(Db), 0 },
+
+/*78*/ { "js", FALSE, NONE, op1(Db), 0 },
+/*79*/ { "jns", FALSE, NONE, op1(Db), 0 },
+/*7a*/ { "jp", FALSE, NONE, op1(Db), 0 },
+/*7b*/ { "jnp", FALSE, NONE, op1(Db), 0 },
+/*7c*/ { "jl", FALSE, NONE, op1(Db), 0 },
+/*7d*/ { "jnl", FALSE, NONE, op1(Db), 0 },
+/*7e*/ { "jle", FALSE, NONE, op1(Db), 0 },
+/*7f*/ { "jnle", FALSE, NONE, op1(Db), 0 },
+
+/*80*/ { "", TRUE, BYTE, op2(I, E), (char *)db_Grp1 },
+/*81*/ { "", TRUE, LONG, op2(I, E), (char *)db_Grp1 },
+/*82*/ { "", TRUE, BYTE, op2(Is,E), (char *)db_Grp1 },
+/*83*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp1 },
+/*84*/ { "test", TRUE, BYTE, op2(R, E), 0 },
+/*85*/ { "test", TRUE, LONG, op2(R, E), 0 },
+/*86*/ { "xchg", TRUE, BYTE, op2(R, E), 0 },
+/*87*/ { "xchg", TRUE, LONG, op2(R, E), 0 },
+
+/*88*/ { "mov", TRUE, BYTE, op2(R, E), 0 },
+/*89*/ { "mov", TRUE, LONG, op2(R, E), 0 },
+/*8a*/ { "mov", TRUE, BYTE, op2(E, R), 0 },
+/*8b*/ { "mov", TRUE, LONG, op2(E, R), 0 },
+/*8c*/ { "mov", TRUE, NONE, op2(S, Ew), 0 },
+/*8d*/ { "lea", TRUE, LONG, op2(E, R), 0 },
+/*8e*/ { "mov", TRUE, NONE, op2(Ew, S), 0 },
+/*8f*/ { "pop", TRUE, LONG, op1(E), 0 },
+
+/*90*/ { "nop", FALSE, NONE, 0, 0 },
+/*91*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*92*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*93*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*94*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*95*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*96*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*97*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+
+/*98*/ { "cbw", FALSE, SDEP, 0, "cwde" }, /* cbw/cwde */
+/*99*/ { "cwd", FALSE, SDEP, 0, "cdq" }, /* cwd/cdq */
+/*9a*/ { "lcall", FALSE, NONE, op1(OS), 0 },
+/*9b*/ { "wait", FALSE, NONE, 0, 0 },
+/*9c*/ { "pushf", FALSE, LONG, 0, 0 },
+/*9d*/ { "popf", FALSE, LONG, 0, 0 },
+/*9e*/ { "sahf", FALSE, NONE, 0, 0 },
+/*9f*/ { "lahf", FALSE, NONE, 0, 0 },
+
+/*a0*/ { "mov", FALSE, BYTE, op2(O, A), 0 },
+/*a1*/ { "mov", FALSE, LONG, op2(O, A), 0 },
+/*a2*/ { "mov", FALSE, BYTE, op2(A, O), 0 },
+/*a3*/ { "mov", FALSE, LONG, op2(A, O), 0 },
+/*a4*/ { "movs", FALSE, BYTE, op2(SI,DI), 0 },
+/*a5*/ { "movs", FALSE, LONG, op2(SI,DI), 0 },
+/*a6*/ { "cmps", FALSE, BYTE, op2(SI,DI), 0 },
+/*a7*/ { "cmps", FALSE, LONG, op2(SI,DI), 0 },
+
+/*a8*/ { "test", FALSE, BYTE, op2(I, A), 0 },
+/*a9*/ { "test", FALSE, LONG, op2(I, A), 0 },
+/*aa*/ { "stos", FALSE, BYTE, op1(DI), 0 },
+/*ab*/ { "stos", FALSE, LONG, op1(DI), 0 },
+/*ac*/ { "lods", FALSE, BYTE, op1(SI), 0 },
+/*ad*/ { "lods", FALSE, LONG, op1(SI), 0 },
+/*ae*/ { "scas", FALSE, BYTE, op1(SI), 0 },
+/*af*/ { "scas", FALSE, LONG, op1(SI), 0 },
+
+/*b0*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b1*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b2*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b3*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b4*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b5*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b6*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b7*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+
+/*b8*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*b9*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*ba*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bb*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bc*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bd*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*be*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bf*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+
+/*c0*/ { "", TRUE, BYTE, op2(Ib, E), (char *)db_Grp2 },
+/*c1*/ { "", TRUE, LONG, op2(Ib, E), (char *)db_Grp2 },
+/*c2*/ { "ret", FALSE, NONE, op1(Iw), 0 },
+/*c3*/ { "ret", FALSE, NONE, 0, 0 },
+/*c4*/ { "les", TRUE, LONG, op2(E, R), 0 },
+/*c5*/ { "lds", TRUE, LONG, op2(E, R), 0 },
+/*c6*/ { "mov", TRUE, BYTE, op2(I, E), 0 },
+/*c7*/ { "mov", TRUE, LONG, op2(I, E), 0 },
+
+/*c8*/ { "enter", FALSE, NONE, op2(Ib, Iw), 0 },
+/*c9*/ { "leave", FALSE, NONE, 0, 0 },
+/*ca*/ { "lret", FALSE, NONE, op1(Iw), 0 },
+/*cb*/ { "lret", FALSE, NONE, 0, 0 },
+/*cc*/ { "int", FALSE, NONE, op1(o3), 0 },
+/*cd*/ { "int", FALSE, NONE, op1(Ib), 0 },
+/*ce*/ { "into", FALSE, NONE, 0, 0 },
+/*cf*/ { "iret", FALSE, NONE, 0, 0 },
+
+/*d0*/ { "", TRUE, BYTE, op2(o1, E), (char *)db_Grp2 },
+/*d1*/ { "", TRUE, LONG, op2(o1, E), (char *)db_Grp2 },
+/*d2*/ { "", TRUE, BYTE, op2(CL, E), (char *)db_Grp2 },
+/*d3*/ { "", TRUE, LONG, op2(CL, E), (char *)db_Grp2 },
+/*d4*/ { "aam", TRUE, NONE, 0, 0 },
+/*d5*/ { "aad", TRUE, NONE, 0, 0 },
+/*d6*/ { "", FALSE, NONE, 0, 0 },
+/*d7*/ { "xlat", FALSE, BYTE, op1(BX), 0 },
+
+/*d8*/ { "", TRUE, NONE, 0, (char *)db_Esc8 },
+/*d9*/ { "", TRUE, NONE, 0, (char *)db_Esc9 },
+/*da*/ { "", TRUE, NONE, 0, (char *)db_Esca },
+/*db*/ { "", TRUE, NONE, 0, (char *)db_Escb },
+/*dc*/ { "", TRUE, NONE, 0, (char *)db_Escc },
+/*dd*/ { "", TRUE, NONE, 0, (char *)db_Escd },
+/*de*/ { "", TRUE, NONE, 0, (char *)db_Esce },
+/*df*/ { "", TRUE, NONE, 0, (char *)db_Escf },
+
+/*e0*/ { "loopne",FALSE, NONE, op1(Db), 0 },
+/*e1*/ { "loope", FALSE, NONE, op1(Db), 0 },
+/*e2*/ { "loop", FALSE, NONE, op1(Db), 0 },
+/*e3*/ { "jcxz", FALSE, SDEP, op1(Db), "jecxz" },
+/*e4*/ { "in", FALSE, BYTE, op2(Ib, A), 0 },
+/*e5*/ { "in", FALSE, LONG, op2(Ib, A) , 0 },
+/*e6*/ { "out", FALSE, BYTE, op2(A, Ib), 0 },
+/*e7*/ { "out", FALSE, LONG, op2(A, Ib) , 0 },
+
+/*e8*/ { "call", FALSE, NONE, op1(Dl), 0 },
+/*e9*/ { "jmp", FALSE, NONE, op1(Dl), 0 },
+/*ea*/ { "ljmp", FALSE, NONE, op1(OS), 0 },
+/*eb*/ { "jmp", FALSE, NONE, op1(Db), 0 },
+/*ec*/ { "in", FALSE, BYTE, op2(DX, A), 0 },
+/*ed*/ { "in", FALSE, LONG, op2(DX, A) , 0 },
+/*ee*/ { "out", FALSE, BYTE, op2(A, DX), 0 },
+/*ef*/ { "out", FALSE, LONG, op2(A, DX) , 0 },
+
+/*f0*/ { "", FALSE, NONE, 0, 0 },
+/*f1*/ { "", FALSE, NONE, 0, 0 },
+/*f2*/ { "", FALSE, NONE, 0, 0 },
+/*f3*/ { "", FALSE, NONE, 0, 0 },
+/*f4*/ { "hlt", FALSE, NONE, 0, 0 },
+/*f5*/ { "cmc", FALSE, NONE, 0, 0 },
+/*f6*/ { "", TRUE, BYTE, 0, (char *)db_Grp3 },
+/*f7*/ { "", TRUE, LONG, 0, (char *)db_Grp3 },
+
+/*f8*/ { "clc", FALSE, NONE, 0, 0 },
+/*f9*/ { "stc", FALSE, NONE, 0, 0 },
+/*fa*/ { "cli", FALSE, NONE, 0, 0 },
+/*fb*/ { "sti", FALSE, NONE, 0, 0 },
+/*fc*/ { "cld", FALSE, NONE, 0, 0 },
+/*fd*/ { "std", FALSE, NONE, 0, 0 },
+/*fe*/ { "", TRUE, NONE, 0, (char *)db_Grp4 },
+/*ff*/ { "", TRUE, NONE, 0, (char *)db_Grp5 },
+};
+
+struct inst db_bad_inst =
+ { "???", FALSE, NONE, 0, 0 }
+;
+
+#define f_mod(byte) ((byte)>>6)
+#define f_reg(byte) (((byte)>>3)&0x7)
+#define f_rm(byte) ((byte)&0x7)
+
+#define sib_ss(byte) ((byte)>>6)
+#define sib_index(byte) (((byte)>>3)&0x7)
+#define sib_base(byte) ((byte)&0x7)
+
+struct i_addr {
+ int is_reg; /* if reg, reg number is in 'disp' */
+ int disp;
+ char * base;
+ char * index;
+ int ss;
+};
+
+char * db_index_reg_16[8] = {
+ "%bx,%si",
+ "%bx,%di",
+ "%bp,%si",
+ "%bp,%di",
+ "%si",
+ "%di",
+ "%bp",
+ "%bx"
+};
+
+char * db_reg[3][8] = {
+ "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh",
+ "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di",
+ "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi"
+};
+
+char * db_seg_reg[8] = {
+ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "", ""
+};
+
+/*
+ * lengths for size attributes
+ */
+int db_lengths[] = {
+ 1, /* BYTE */
+ 2, /* WORD */
+ 4, /* LONG */
+ 8, /* QUAD */
+ 4, /* SNGL */
+ 8, /* DBLR */
+ 10, /* EXTR */
+};
+
+#define get_value_inc(result, loc, size, is_signed) \
+ result = db_get_value((loc), (size), (is_signed)); \
+ (loc) += (size);
+
+/*
+ * Read address at location and return updated location.
+ */
+db_addr_t
+db_read_address(loc, short_addr, regmodrm, addrp)
+ db_addr_t loc;
+ int short_addr;
+ int regmodrm;
+ struct i_addr *addrp; /* out */
+{
+ int mod, rm, sib, index, ss, disp;
+
+ mod = f_mod(regmodrm);
+ rm = f_rm(regmodrm);
+
+ if (mod == 3) {
+ addrp->is_reg = TRUE;
+ addrp->disp = rm;
+ return (loc);
+ }
+ addrp->is_reg = FALSE;
+ addrp->index = 0;
+
+ if (short_addr) {
+ addrp->index = 0;
+ addrp->ss = 0;
+ switch (mod) {
+ case 0:
+ if (rm == 6) {
+ get_value_inc(disp, loc, 2, TRUE);
+ addrp->disp = disp;
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_index_reg_16[rm];
+ }
+ break;
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ case 2:
+ get_value_inc(disp, loc, 2, TRUE);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ }
+ }
+ else {
+ if (mod != 3 && rm == 4) {
+ get_value_inc(sib, loc, 1, FALSE);
+ rm = sib_base(sib);
+ index = sib_index(sib);
+ if (index != 4)
+ addrp->index = db_reg[LONG][index];
+ addrp->ss = sib_ss(sib);
+ }
+
+ switch (mod) {
+ case 0:
+ if (rm == 5) {
+ get_value_inc(addrp->disp, loc, 4, FALSE);
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_reg[LONG][rm];
+ }
+ break;
+
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+
+ case 2:
+ get_value_inc(disp, loc, 4, FALSE);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+ }
+ }
+ return (loc);
+}
+
+void
+db_print_address(seg, size, addrp)
+ char * seg;
+ int size;
+ struct i_addr *addrp;
+{
+ if (addrp->is_reg) {
+ db_printf("%s", db_reg[size][addrp->disp]);
+ return;
+ }
+
+ if (seg) {
+ db_printf("%s:", seg);
+ }
+
+ db_printsym((db_addr_t)addrp->disp, DB_STGY_ANY);
+ if (addrp->base != 0 || addrp->index != 0) {
+ db_printf("(");
+ if (addrp->base)
+ db_printf("%s", addrp->base);
+ if (addrp->index)
+ db_printf(",%s,%d", addrp->index, 1<<addrp->ss);
+ db_printf(")");
+ }
+}
+
+/*
+ * Disassemble floating-point ("escape") instruction
+ * and return updated location.
+ */
+db_addr_t
+db_disasm_esc(loc, inst, short_addr, size, seg)
+ db_addr_t loc;
+ int inst;
+ int short_addr;
+ int size;
+ char * seg;
+{
+ int regmodrm;
+ struct finst *fp;
+ int mod;
+ struct i_addr address;
+ char * name;
+
+ get_value_inc(regmodrm, loc, 1, FALSE);
+ fp = &db_Esc_inst[inst - 0xd8][f_reg(regmodrm)];
+ mod = f_mod(regmodrm);
+ if (mod != 3) {
+ /*
+ * Normal address modes.
+ */
+ loc = db_read_address(loc, short_addr, regmodrm, &address);
+ db_printf(fp->f_name);
+ switch(fp->f_size) {
+ case SNGL:
+ db_printf("s");
+ break;
+ case DBLR:
+ db_printf("l");
+ break;
+ case EXTR:
+ db_printf("t");
+ break;
+ case WORD:
+ db_printf("s");
+ break;
+ case LONG:
+ db_printf("l");
+ break;
+ case QUAD:
+ db_printf("q");
+ break;
+ default:
+ break;
+ }
+ db_printf("\t");
+ db_print_address(seg, BYTE, &address);
+ }
+ else {
+ /*
+ * 'reg-reg' - special formats
+ */
+ switch (fp->f_rrmode) {
+ case op2(ST,STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st,%%st(%d)",name,f_rm(regmodrm));
+ break;
+ case op2(STI,ST):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d),%%st",name, f_rm(regmodrm));
+ break;
+ case op1(STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d)",name, f_rm(regmodrm));
+ break;
+ case op1(X):
+ db_printf("%s", ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ case op1(XA):
+ db_printf("%s\t%%ax",
+ ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ default:
+ db_printf("<bad instruction>");
+ break;
+ }
+ }
+
+ return (loc);
+}
+
+/*
+ * Disassemble instruction at 'loc'. 'altfmt' specifies an
+ * (optional) alternate format. Return address of start of
+ * next instruction.
+ */
+db_addr_t
+db_disasm(loc, altfmt)
+ db_addr_t loc;
+ boolean_t altfmt;
+{
+ int inst;
+ int size;
+ int short_addr;
+ char * seg;
+ struct inst * ip;
+ char * i_name;
+ int i_size;
+ int i_mode;
+ int regmodrm = 0;
+ boolean_t first;
+ int displ;
+ int prefix;
+ int imm;
+ int imm2;
+ int len;
+ struct i_addr address;
+
+ get_value_inc(inst, loc, 1, FALSE);
+ short_addr = FALSE;
+ size = LONG;
+ seg = 0;
+
+ /*
+ * Get prefixes
+ */
+ prefix = TRUE;
+ do {
+ switch (inst) {
+ case 0x66: /* data16 */
+ size = WORD;
+ break;
+ case 0x67:
+ short_addr = TRUE;
+ break;
+ case 0x26:
+ seg = "%es";
+ break;
+ case 0x36:
+ seg = "%ss";
+ break;
+ case 0x2e:
+ seg = "%cs";
+ break;
+ case 0x3e:
+ seg = "%ds";
+ break;
+ case 0x64:
+ seg = "%fs";
+ break;
+ case 0x65:
+ seg = "%gs";
+ break;
+ case 0xf0:
+ db_printf("lock ");
+ break;
+ case 0xf2:
+ db_printf("repne ");
+ break;
+ case 0xf3:
+ db_printf("repe "); /* XXX repe VS rep */
+ break;
+ default:
+ prefix = FALSE;
+ break;
+ }
+ if (prefix) {
+ get_value_inc(inst, loc, 1, FALSE);
+ }
+ } while (prefix);
+
+ if (inst >= 0xd8 && inst <= 0xdf) {
+ loc = db_disasm_esc(loc, inst, short_addr, size, seg);
+ db_printf("\n");
+ return (loc);
+ }
+
+ if (inst == 0x0f) {
+ get_value_inc(inst, loc, 1, FALSE);
+ ip = db_inst_0f[inst>>4];
+ if (ip == 0) {
+ ip = &db_bad_inst;
+ }
+ else {
+ ip = &ip[inst&0xf];
+ }
+ }
+ else
+ ip = &db_inst_table[inst];
+
+ if (ip->i_has_modrm) {
+ get_value_inc(regmodrm, loc, 1, FALSE);
+ loc = db_read_address(loc, short_addr, regmodrm, &address);
+ }
+
+ i_name = ip->i_name;
+ i_size = ip->i_size;
+ i_mode = ip->i_mode;
+
+ if (ip->i_extra == (char *)db_Grp1 ||
+ ip->i_extra == (char *)db_Grp2 ||
+ ip->i_extra == (char *)db_Grp6 ||
+ ip->i_extra == (char *)db_Grp7 ||
+ ip->i_extra == (char *)db_Grp8) {
+ i_name = ((char **)ip->i_extra)[f_reg(regmodrm)];
+ }
+ else if (ip->i_extra == (char *)db_Grp3) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ }
+ else if (ip->i_extra == (char *)db_Grp4 ||
+ ip->i_extra == (char *)db_Grp5) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ i_size = ip->i_size;
+ }
+
+ if (i_size == SDEP) {
+ if (size == WORD)
+ db_printf(i_name);
+ else
+ db_printf(ip->i_extra);
+ }
+ else {
+ db_printf(i_name);
+ if (i_size != NONE) {
+ if (i_size == BYTE) {
+ db_printf("b");
+ size = BYTE;
+ }
+ else if (i_size == WORD) {
+ db_printf("w");
+ size = WORD;
+ }
+ else if (size == WORD)
+ db_printf("w");
+ else
+ db_printf("l");
+ }
+ }
+ db_printf("\t");
+ for (first = TRUE;
+ i_mode != 0;
+ i_mode >>= 8, first = FALSE)
+ {
+ if (!first)
+ db_printf(",");
+
+ switch (i_mode & 0xFF) {
+
+ case E:
+ db_print_address(seg, size, &address);
+ break;
+
+ case Eind:
+ db_printf("*");
+ db_print_address(seg, size, &address);
+ break;
+
+ case Ew:
+ db_print_address(seg, WORD, &address);
+ break;
+
+ case Eb:
+ db_print_address(seg, BYTE, &address);
+ break;
+
+ case R:
+ db_printf("%s", db_reg[size][f_reg(regmodrm)]);
+ break;
+
+ case Rw:
+ db_printf("%s", db_reg[WORD][f_reg(regmodrm)]);
+ break;
+
+ case Ri:
+ db_printf("%s", db_reg[size][f_rm(inst)]);
+ break;
+
+ case S:
+ db_printf("%s", db_seg_reg[f_reg(regmodrm)]);
+ break;
+
+ case Si:
+ db_printf("%s", db_seg_reg[f_reg(inst)]);
+ break;
+
+ case A:
+ db_printf("%s", db_reg[size][0]); /* acc */
+ break;
+
+ case BX:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%bx" : "%ebx");
+ break;
+
+ case CL:
+ db_printf("%%cl");
+ break;
+
+ case DX:
+ db_printf("%%dx");
+ break;
+
+ case SI:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%si" : "%esi");
+ break;
+
+ case DI:
+ db_printf("%%es:(%s)", short_addr ? "%di" : "%edi");
+ break;
+
+ case CR:
+ db_printf("%%cr%d", f_reg(regmodrm));
+ break;
+
+ case DR:
+ db_printf("%%dr%d", f_reg(regmodrm));
+ break;
+
+ case TR:
+ db_printf("%%tr%d", f_reg(regmodrm));
+ break;
+
+ case I:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, FALSE);/* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Is:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, TRUE); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Ib:
+ get_value_inc(imm, loc, 1, FALSE); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Ibs:
+ get_value_inc(imm, loc, 1, TRUE); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Iw:
+ get_value_inc(imm, loc, 2, FALSE); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Il:
+ get_value_inc(imm, loc, 4, FALSE);
+ db_printf("$%#n", imm);
+ break;
+
+ case O:
+ if (short_addr) {
+ get_value_inc(displ, loc, 2, TRUE);
+ }
+ else {
+ get_value_inc(displ, loc, 4, TRUE);
+ }
+ if (seg)
+ db_printf("%s:%#r",seg, displ);
+ else
+ db_printsym((db_addr_t)displ, DB_STGY_ANY);
+ break;
+
+ case Db:
+ get_value_inc(displ, loc, 1, TRUE);
+ db_printsym((db_addr_t)(displ + loc), DB_STGY_XTRN);
+ break;
+
+ case Dl:
+ get_value_inc(displ, loc, 4, TRUE);
+ db_printsym((db_addr_t)(displ + loc), DB_STGY_XTRN);
+ break;
+
+ case o1:
+ db_printf("$1");
+ break;
+
+ case o3:
+ db_printf("$3");
+ break;
+
+ case OS:
+ get_value_inc(imm, loc, 4, FALSE); /* offset */
+ get_value_inc(imm2, loc, 2, FALSE); /* segment */
+ db_printf("$%#n,%#n", imm2, imm);
+ break;
+ }
+ }
+
+ if (altfmt == 0) {
+ if (inst == 0xe9 || inst == 0xeb) {
+ /*
+ * GAS pads to longword boundary after unconditional jumps.
+ */
+ loc = (loc + (4-1)) & ~(4-1);
+ }
+ }
+ db_printf("\n");
+ return (loc);
+}
+
diff --git a/sys/amd64/amd64/db_interface.c b/sys/amd64/amd64/db_interface.c
new file mode 100644
index 0000000..e79a2ae
--- /dev/null
+++ b/sys/amd64/amd64/db_interface.c
@@ -0,0 +1,240 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $Id: db_interface.c,v 1.5 1993/12/19 00:50:00 wollman Exp $
+ */
+
+/*
+ * Interface to new debugger.
+ */
+#include "param.h"
+#include "systm.h"
+#include "kernel.h"
+#include "proc.h"
+#include "ddb/ddb.h"
+
+#include <sys/reboot.h>
+/* #include <vm/vm_statistics.h> */
+#include <vm/pmap.h>
+
+#include <setjmp.h>
+
+int db_active = 0;
+
+db_regs_t ddb_regs;
+
+/*
+ * Received keyboard interrupt sequence.
+ */
+void
+kdb_kbd_trap(regs)
+ struct i386_saved_state *regs;
+{
+ if (db_active == 0 && (boothowto & RB_KDB)) {
+ printf("\n\nkernel: keyboard interrupt\n");
+ kdb_trap(-1, 0, regs);
+ }
+}
+
+/*
+ * kdb_trap - field a TRACE or BPT trap
+ */
+
+static jmp_buf *db_nofault = 0;
+
+int
+kdb_trap(type, code, regs)
+ int type, code;
+ register struct i386_saved_state *regs;
+{
+#if 0
+ if ((boothowto&RB_KDB) == 0)
+ return(0);
+#endif
+
+ switch (type) {
+ case T_BPTFLT /* T_INT3 */: /* breakpoint */
+ case T_KDBTRAP /* T_WATCHPOINT */: /* watchpoint */
+ case T_PRIVINFLT /* T_DEBUG */: /* single_step */
+
+ case -1: /* keyboard interrupt */
+ break;
+
+ default:
+ kdbprinttrap(type, code);
+
+ if (db_nofault) {
+ jmp_buf *no_fault = db_nofault;
+ db_nofault = 0;
+ longjmp(*no_fault, 1);
+ }
+ }
+
+ /* Should switch to kdb`s own stack here. */
+
+ ddb_regs = *regs;
+
+ if ((regs->tf_cs & 0x3) == 0) {
+ /*
+ * Kernel mode - esp and ss not saved
+ */
+ ddb_regs.tf_esp = (int)&regs->tf_esp; /* kernel stack pointer */
+#if 0
+ ddb_regs.ss = KERNEL_DS;
+#endif
+ asm(" movw %%ss,%%ax; movl %%eax,%0 "
+ : "=g" (ddb_regs.tf_ss)
+ :
+ : "ax");
+ }
+
+ db_active++;
+ cnpollc(TRUE);
+ db_trap(type, code);
+ cnpollc(FALSE);
+ db_active--;
+
+ regs->tf_eip = ddb_regs.tf_eip;
+ regs->tf_eflags = ddb_regs.tf_eflags;
+ regs->tf_eax = ddb_regs.tf_eax;
+ regs->tf_ecx = ddb_regs.tf_ecx;
+ regs->tf_edx = ddb_regs.tf_edx;
+ regs->tf_ebx = ddb_regs.tf_ebx;
+ if (regs->tf_cs & 0x3) {
+ /*
+ * user mode - saved esp and ss valid
+ */
+ regs->tf_esp = ddb_regs.tf_esp; /* user stack pointer */
+ regs->tf_ss = ddb_regs.tf_ss & 0xffff; /* user stack segment */
+ }
+ regs->tf_ebp = ddb_regs.tf_ebp;
+ regs->tf_esi = ddb_regs.tf_esi;
+ regs->tf_edi = ddb_regs.tf_edi;
+ regs->tf_es = ddb_regs.tf_es & 0xffff;
+ regs->tf_cs = ddb_regs.tf_cs & 0xffff;
+ regs->tf_ds = ddb_regs.tf_ds & 0xffff;
+#if 0
+ regs->tf_fs = ddb_regs.tf_fs & 0xffff;
+ regs->tf_gs = ddb_regs.tf_gs & 0xffff;
+#endif
+
+ return (1);
+}
+
+/*
+ * Print trap reason.
+ */
+void
+kdbprinttrap(type, code)
+ int type, code;
+{
+ printf("kernel: ");
+ printf("type %d", type);
+ printf(" trap, code=%x\n", code);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+
+extern jmp_buf db_jmpbuf;
+
+void
+db_read_bytes(addr, size, data)
+ vm_offset_t addr;
+ register int size;
+ register char *data;
+{
+ register char *src;
+
+ db_nofault = &db_jmpbuf;
+
+ src = (char *)addr;
+ while (--size >= 0)
+ *data++ = *src++;
+
+ db_nofault = 0;
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(addr, size, data)
+ vm_offset_t addr;
+ register int size;
+ register char *data;
+{
+ register char *dst;
+
+ register pt_entry_t *ptep0 = 0;
+ pt_entry_t oldmap0 = { 0 };
+ vm_offset_t addr1;
+ register pt_entry_t *ptep1 = 0;
+ pt_entry_t oldmap1 = { 0 };
+ extern char etext;
+
+ db_nofault = &db_jmpbuf;
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS &&
+ addr <= (vm_offset_t)&etext)
+ {
+ ptep0 = pmap_pte(kernel_pmap, addr);
+ oldmap0 = *ptep0;
+ *(int *)ptep0 |= /* INTEL_PTE_WRITE */ PG_RW;
+
+ addr1 = i386_trunc_page(addr + size - 1);
+ if (i386_trunc_page(addr) != addr1) {
+ /* data crosses a page boundary */
+
+ ptep1 = pmap_pte(kernel_pmap, addr1);
+ oldmap1 = *ptep1;
+ *(int *)ptep1 |= /* INTEL_PTE_WRITE */ PG_RW;
+ }
+ tlbflush();
+ }
+
+ dst = (char *)addr;
+
+ while (--size >= 0)
+ *dst++ = *data++;
+
+ db_nofault = 0;
+
+ if (ptep0) {
+ *ptep0 = oldmap0;
+ if (ptep1) {
+ *ptep1 = oldmap1;
+ }
+ tlbflush();
+ }
+}
+
+void
+Debugger (msg)
+ const char *msg;
+{
+ asm ("int $3");
+}
diff --git a/sys/amd64/amd64/db_trace.c b/sys/amd64/amd64/db_trace.c
new file mode 100644
index 0000000..d536d94
--- /dev/null
+++ b/sys/amd64/amd64/db_trace.c
@@ -0,0 +1,340 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $Id: db_trace.c,v 1.4 1994/01/03 07:55:19 davidg Exp $
+ */
+
+#include "param.h"
+
+#include <vm/vm_param.h>
+#include <vm/lock.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
+#include <machine/pmap.h>
+#include "systm.h"
+#include "proc.h"
+#include "ddb/ddb.h"
+
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+
+/*
+ * Machine register set.
+ */
+struct db_variable db_regs[] = {
+ "cs", (int *)&ddb_regs.tf_cs, FCN_NULL,
+ "ds", (int *)&ddb_regs.tf_ds, FCN_NULL,
+ "es", (int *)&ddb_regs.tf_es, FCN_NULL,
+#if 0
+ "fs", (int *)&ddb_regs.tf_fs, FCN_NULL,
+ "gs", (int *)&ddb_regs.tf_gs, FCN_NULL,
+#endif
+ "ss", (int *)&ddb_regs.tf_ss, FCN_NULL,
+ "eax", (int *)&ddb_regs.tf_eax, FCN_NULL,
+ "ecx", (int *)&ddb_regs.tf_ecx, FCN_NULL,
+ "edx", (int *)&ddb_regs.tf_edx, FCN_NULL,
+ "ebx", (int *)&ddb_regs.tf_ebx, FCN_NULL,
+ "esp", (int *)&ddb_regs.tf_esp,FCN_NULL,
+ "ebp", (int *)&ddb_regs.tf_ebp, FCN_NULL,
+ "esi", (int *)&ddb_regs.tf_esi, FCN_NULL,
+ "edi", (int *)&ddb_regs.tf_edi, FCN_NULL,
+ "eip", (int *)&ddb_regs.tf_eip, FCN_NULL,
+ "efl", (int *)&ddb_regs.tf_eflags, FCN_NULL,
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+/*
+ * Stack trace.
+ */
+#define INKERNEL(va) (((vm_offset_t)(va)) >= VM_MIN_KERNEL_ADDRESS)
+
+struct i386_frame {
+ struct i386_frame *f_frame;
+ int f_retaddr;
+ int f_arg0;
+};
+
+#define TRAP 1
+#define INTERRUPT 2
+#define SYSCALL 3
+
+db_addr_t db_trap_symbol_value = 0;
+db_addr_t db_syscall_symbol_value = 0;
+db_addr_t db_kdintr_symbol_value = 0;
+boolean_t db_trace_symbols_found = FALSE;
+
+void
+db_find_trace_symbols()
+{
+ db_expr_t value;
+ if (db_value_of_name("_trap", &value))
+ db_trap_symbol_value = (db_addr_t) value;
+ if (db_value_of_name("_kdintr", &value))
+ db_kdintr_symbol_value = (db_addr_t) value;
+ if (db_value_of_name("_syscall", &value))
+ db_syscall_symbol_value = (db_addr_t) value;
+ db_trace_symbols_found = TRUE;
+}
+
+/*
+ * Figure out how many arguments were passed into the frame at "fp".
+ */
+int
+db_numargs(fp)
+ struct i386_frame *fp;
+{
+ int *argp;
+ int inst;
+ int args;
+ extern char etext[];
+
+ argp = (int *)db_get_value((int)&fp->f_retaddr, 4, FALSE);
+ if (argp < (int *)VM_MIN_KERNEL_ADDRESS || argp > (int *)etext)
+ args = 5;
+ else {
+ inst = db_get_value((int)argp, 4, FALSE);
+ if ((inst & 0xff) == 0x59) /* popl %ecx */
+ args = 1;
+ else if ((inst & 0xffff) == 0xc483) /* addl %n, %esp */
+ args = ((inst >> 16) & 0xff) / 4;
+ else
+ args = 5;
+ }
+ return (args);
+}
+
+/*
+ * Figure out the next frame up in the call stack.
+ * For trap(), we print the address of the faulting instruction and
+ * proceed with the calling frame. We return the ip that faulted.
+ * If the trap was caused by jumping through a bogus pointer, then
+ * the next line in the backtrace will list some random function as
+ * being called. It should get the argument list correct, though.
+ * It might be possible to dig out from the next frame up the name
+ * of the function that faulted, but that could get hairy.
+ */
+void
+db_nextframe(fp, ip, argp, is_trap)
+ struct i386_frame **fp; /* in/out */
+ db_addr_t *ip; /* out */
+ int *argp; /* in */
+ int is_trap; /* in */
+{
+ struct i386_saved_state *saved_regs;
+
+ switch (is_trap) {
+ case 0:
+ *ip = (db_addr_t)
+ db_get_value((int) &(*fp)->f_retaddr, 4, FALSE);
+ *fp = (struct i386_frame *)
+ db_get_value((int) &(*fp)->f_frame, 4, FALSE);
+ break;
+ case TRAP:
+ default:
+ /*
+ * We know that trap() has 1 argument and we know that
+ * it is an (int *).
+ */
+#if 0
+ saved_regs = (struct i386_saved_state *)
+ db_get_value((int)argp, 4, FALSE);
+#endif
+ saved_regs = (struct i386_saved_state *)argp;
+ db_printf("--- trap (number %d) ---\n",
+ saved_regs->tf_trapno & 0xffff);
+ db_printsym(saved_regs->tf_eip, DB_STGY_XTRN);
+ db_printf(":\n");
+ *fp = (struct i386_frame *)saved_regs->tf_ebp;
+ *ip = (db_addr_t)saved_regs->tf_eip;
+ break;
+
+ case SYSCALL: {
+ struct trapframe *saved_regs = (struct trapframe *)argp;
+
+ db_printf("--- syscall (number %d) ---\n", saved_regs->tf_eax);
+ db_printsym(saved_regs->tf_eip, DB_STGY_XTRN);
+ db_printf(":\n");
+ *fp = (struct i386_frame *)saved_regs->tf_ebp;
+ *ip = (db_addr_t)saved_regs->tf_eip;
+ }
+ break;
+ }
+}
+
+void
+db_stack_trace_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char *modif;
+{
+ struct i386_frame *frame, *lastframe;
+ int *argp;
+ db_addr_t callpc;
+ int is_trap;
+ boolean_t kernel_only = TRUE;
+ boolean_t trace_thread = FALSE;
+
+#if 0
+ if (!db_trace_symbols_found)
+ db_find_trace_symbols();
+#endif
+
+ {
+ register char *cp = modif;
+ register char c;
+
+ while ((c = *cp++) != 0) {
+ if (c == 't')
+ trace_thread = TRUE;
+ if (c == 'u')
+ kernel_only = FALSE;
+ }
+ }
+
+ if (count == -1)
+ count = 65535;
+
+ if (!have_addr) {
+ frame = (struct i386_frame *)ddb_regs.tf_ebp;
+ callpc = (db_addr_t)ddb_regs.tf_eip;
+ }
+ else if (trace_thread) {
+ printf ("db_trace.c: can't trace thread\n");
+ }
+ else {
+ frame = (struct i386_frame *)addr;
+ callpc = (db_addr_t)db_get_value((int)&frame->f_retaddr, 4, FALSE);
+ }
+
+ lastframe = 0;
+ while (count-- && frame != 0) {
+ int narg;
+ char * name;
+ db_expr_t offset;
+ db_sym_t sym;
+#define MAXNARG 16
+ char *argnames[MAXNARG], **argnp = NULL;
+
+ sym = db_search_symbol(callpc, DB_STGY_ANY, &offset);
+ db_symbol_values(sym, &name, NULL);
+
+ if (lastframe == 0 && sym == NULL) {
+ /* Symbol not found, peek at code */
+ int instr = db_get_value(callpc, 4, FALSE);
+
+ offset = 1;
+ if ((instr & 0x00ffffff) == 0x00e58955 ||
+ /* enter: pushl %ebp, movl %esp, %ebp */
+ (instr & 0x0000ffff) == 0x0000e589
+ /* enter+1: movl %esp, %ebp */ ) {
+ offset = 0;
+ }
+ }
+#define STRCMP(s1,s2) ((s1) && (s2) && strcmp((s1), (s2)) == 0)
+ if (INKERNEL((int)frame) && STRCMP(name, "_trap")) {
+ narg = 1;
+ is_trap = TRAP;
+ }
+ else
+ if (INKERNEL((int)frame) && STRCMP(name, "_kdintr")) {
+ is_trap = INTERRUPT;
+ narg = 0;
+ }
+ else
+ if (INKERNEL((int)frame) && STRCMP(name, "_syscall")) {
+ is_trap = SYSCALL;
+ narg = 0;
+ }
+#undef STRCMP
+ else {
+ is_trap = 0;
+ narg = MAXNARG;
+ if (db_sym_numargs(sym, &narg, argnames)) {
+ argnp = argnames;
+ } else {
+ narg = db_numargs(frame);
+ }
+ }
+
+ db_printf("%s(", name);
+
+ if (lastframe == 0 && offset == 0 && !have_addr) {
+ /*
+ * We have a breakpoint before the frame is set up
+ * Use %esp instead
+ */
+ argp = &((struct i386_frame *)(ddb_regs.tf_esp-4))->f_arg0;
+ } else
+ argp = &frame->f_arg0;
+
+ while (narg) {
+ if (argnp)
+ db_printf("%s=", *argnp++);
+ db_printf("%x", db_get_value((int)argp, 4, FALSE));
+ argp++;
+ if (--narg != 0)
+ db_printf(",");
+ }
+ db_printf(") at ");
+ db_printsym(callpc, DB_STGY_PROC);
+ db_printf("\n");
+
+ if (lastframe == 0 && offset == 0 && !have_addr) {
+ /* Frame really belongs to next callpc */
+ lastframe = (struct i386_frame *)(ddb_regs.tf_esp-4);
+ callpc = (db_addr_t)db_get_value((int)&lastframe->f_retaddr, 4, FALSE);
+ continue;
+ }
+
+ lastframe = frame;
+ db_nextframe(&frame, &callpc, &frame->f_arg0, is_trap);
+
+ if (frame == 0) {
+ /* end of chain */
+ break;
+ }
+ if (INKERNEL((int)frame)) {
+ /* staying in kernel */
+ if (frame <= lastframe) {
+ db_printf("Bad frame pointer: 0x%x\n", frame);
+ break;
+ }
+ }
+ else if (INKERNEL((int)lastframe)) {
+ /* switch from user to kernel */
+ if (kernel_only)
+ break; /* kernel stack only */
+ }
+ else {
+ /* in user */
+ if (frame <= lastframe) {
+ db_printf("Bad user frame pointer: 0x%x\n", frame);
+ break;
+ }
+ }
+ }
+}
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
new file mode 100644
index 0000000..30bc164
--- /dev/null
+++ b/sys/amd64/amd64/exception.S
@@ -0,0 +1,275 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: exception.s,v 1.2 1994/01/03 07:55:20 davidg Exp $
+ */
+
+#include "npx.h" /* NNPX */
+
+#include "assym.s" /* system defines */
+
+#include "errno.h" /* error return codes */
+
+#include "machine/spl.h" /* SWI_AST_MASK ... */
+
+#include "machine/psl.h" /* PSL_I */
+
+#include "machine/trap.h" /* trap codes */
+#include "syscall.h" /* syscall numbers */
+
+#include "machine/asmacros.h" /* miscellaneous macros */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define SEL_RPL_MASK 0x0003
+#define TRAPF_CS_OFF (13 * 4)
+
+ .text
+
+/*****************************************************************************/
+/* Trap handling */
+/*****************************************************************************/
+/*
+ * Trap and fault vector routines
+ */
+#define IDTVEC(name) ALIGN_TEXT ; .globl _X/**/name ; _X/**/name:
+#define TRAP(a) pushl $(a) ; jmp _alltraps
+
+/*
+ * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
+ * control. The sti's give the standard losing behaviour for ddb and kgdb.
+ */
+#ifdef BDE_DEBUGGER
+#define BDBTRAP(name) \
+ ss ; \
+ cmpb $0,_bdb_exists ; \
+ je 1f ; \
+ testb $SEL_RPL_MASK,4(%esp) ; \
+ jne 1f ; \
+ ss ; \
+ .globl bdb_/**/name/**/_ljmp ; \
+bdb_/**/name/**/_ljmp: ; \
+ ljmp $0,$0 ; \
+1:
+#else
+#define BDBTRAP(name)
+#endif
+
+#ifdef KGDB
+# define BPTTRAP(a) testl $PSL_I,4+8(%esp) ; je 1f ; sti ; 1: ; \
+ pushl $(a) ; jmp _bpttraps
+#else
+# define BPTTRAP(a) testl $PSL_I,4+8(%esp) ; je 1f ; sti ; 1: ; TRAP(a)
+#endif
+
+MCOUNT_LABEL(user)
+MCOUNT_LABEL(btrap)
+
+IDTVEC(div)
+ pushl $0; TRAP(T_DIVIDE)
+IDTVEC(dbg)
+ BDBTRAP(dbg)
+ pushl $0; BPTTRAP(T_TRCTRAP)
+IDTVEC(nmi)
+ pushl $0; TRAP(T_NMI)
+IDTVEC(bpt)
+ BDBTRAP(bpt)
+ pushl $0; BPTTRAP(T_BPTFLT)
+IDTVEC(ofl)
+ pushl $0; TRAP(T_OFLOW)
+IDTVEC(bnd)
+ pushl $0; TRAP(T_BOUND)
+IDTVEC(ill)
+ pushl $0; TRAP(T_PRIVINFLT)
+IDTVEC(dna)
+ pushl $0; TRAP(T_DNA)
+IDTVEC(dble)
+ TRAP(T_DOUBLEFLT)
+IDTVEC(fpusegm)
+ pushl $0; TRAP(T_FPOPFLT)
+IDTVEC(tss)
+ TRAP(T_TSSFLT)
+IDTVEC(missing)
+ TRAP(T_SEGNPFLT)
+IDTVEC(stk)
+ TRAP(T_STKFLT)
+IDTVEC(prot)
+ TRAP(T_PROTFLT)
+IDTVEC(page)
+ TRAP(T_PAGEFLT)
+IDTVEC(rsvd)
+ pushl $0; TRAP(T_RESERVED)
+IDTVEC(fpu)
+#if NNPX > 0
+ /*
+ * Handle like an interrupt so that we can call npxintr to clear the
+ * error. It would be better to handle npx interrupts as traps but
+ * this is difficult for nested interrupts.
+ */
+ pushl $0 /* dumby error code */
+ pushl $0 /* dumby trap type */
+ pushal
+ pushl %ds
+ pushl %es /* now the stack frame is a trap frame */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ FAKE_MCOUNT(12*4(%esp))
+ movl _cpl,%eax
+ pushl %eax
+ pushl $0 /* dummy unit to finish building intr frame */
+ incl _cnt+V_TRAP
+ orl $SWI_AST_MASK,%eax
+ movl %eax,_cpl
+ call _npxintr
+ MEXITCOUNT
+ jmp _doreti
+#else /* NNPX > 0 */
+ pushl $0; TRAP(T_ARITHTRAP)
+#endif /* NNPX > 0 */
+ /* 17 - 31 reserved for future exp */
+IDTVEC(rsvd0)
+ pushl $0; TRAP(17)
+IDTVEC(rsvd1)
+ pushl $0; TRAP(18)
+IDTVEC(rsvd2)
+ pushl $0; TRAP(19)
+IDTVEC(rsvd3)
+ pushl $0; TRAP(20)
+IDTVEC(rsvd4)
+ pushl $0; TRAP(21)
+IDTVEC(rsvd5)
+ pushl $0; TRAP(22)
+IDTVEC(rsvd6)
+ pushl $0; TRAP(23)
+IDTVEC(rsvd7)
+ pushl $0; TRAP(24)
+IDTVEC(rsvd8)
+ pushl $0; TRAP(25)
+IDTVEC(rsvd9)
+ pushl $0; TRAP(26)
+IDTVEC(rsvd10)
+ pushl $0; TRAP(27)
+IDTVEC(rsvd11)
+ pushl $0; TRAP(28)
+IDTVEC(rsvd12)
+ pushl $0; TRAP(29)
+IDTVEC(rsvd13)
+ pushl $0; TRAP(30)
+IDTVEC(rsvd14)
+ pushl $0; TRAP(31)
+
+ SUPERALIGN_TEXT
+_alltraps:
+ pushal
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ FAKE_MCOUNT(12*4(%esp))
+calltrap:
+ FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
+ incl _cnt+V_TRAP
+ orl $SWI_AST_MASK,_cpl
+ call _trap
+ /*
+ * There was no place to save the cpl so we have to recover it
+ * indirectly. For traps from user mode it was 0, and for traps
+ * from kernel mode Oring SWI_AST_MASK into it didn't change it.
+ */
+ subl %eax,%eax
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
+ jne 1f
+ movl _cpl,%eax
+1:
+ /*
+ * Return via _doreti to handle ASTs. Have to change trap frame
+ * to interrupt frame.
+ */
+ pushl %eax
+ subl $4,%esp
+ MEXITCOUNT
+ jmp _doreti
+
+#ifdef KGDB
+/*
+ * This code checks for a kgdb trap, then falls through
+ * to the regular trap code.
+ */
+ SUPERALIGN_TEXT
+_bpttraps:
+ pushal
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ FAKE_MCOUNT(12*4(%esp))
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) /* non-kernel mode? */
+ jne calltrap /* yes */
+ call _kgdb_trap_glue
+ MEXITCOUNT
+ jmp calltrap
+#endif
+
+/*
+ * Call gate entry for syscall
+ */
+ SUPERALIGN_TEXT
+IDTVEC(syscall)
+ pushfl /* Room for tf_err */
+ pushfl /* Room for tf_trapno */
+ pushal
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax /* switch to kernel segments */
+ movl %ax,%ds
+ movl %ax,%es
+ movl TF_ERR(%esp),%eax /* copy eflags from tf_err to fs_eflags */
+ movl %eax,TF_EFLAGS(%esp)
+ FAKE_MCOUNT(12*4(%esp))
+ incl _cnt+V_SYSCALL
+ movl $SWI_AST_MASK,_cpl
+ call _syscall
+ /*
+ * Return via _doreti to handle ASTs.
+ */
+ pushl $0 /* cpl to restore */
+ subl $4,%esp
+ MEXITCOUNT
+ jmp _doreti
+
+/*
+ * include generated interrupt vectors and ISA intr code
+ */
+#include "i386/isa/vector.s"
+#include "i386/isa/icu.s"
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
new file mode 100644
index 0000000..30bc164
--- /dev/null
+++ b/sys/amd64/amd64/exception.s
@@ -0,0 +1,275 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: exception.s,v 1.2 1994/01/03 07:55:20 davidg Exp $
+ */
+
+#include "npx.h" /* NNPX */
+
+#include "assym.s" /* system defines */
+
+#include "errno.h" /* error return codes */
+
+#include "machine/spl.h" /* SWI_AST_MASK ... */
+
+#include "machine/psl.h" /* PSL_I */
+
+#include "machine/trap.h" /* trap codes */
+#include "syscall.h" /* syscall numbers */
+
+#include "machine/asmacros.h" /* miscellaneous macros */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define SEL_RPL_MASK 0x0003
+#define TRAPF_CS_OFF (13 * 4)
+
+ .text
+
+/*****************************************************************************/
+/* Trap handling */
+/*****************************************************************************/
+/*
+ * Trap and fault vector routines
+ */
+#define IDTVEC(name) ALIGN_TEXT ; .globl _X/**/name ; _X/**/name:
+#define TRAP(a) pushl $(a) ; jmp _alltraps
+
+/*
+ * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
+ * control. The sti's give the standard losing behaviour for ddb and kgdb.
+ */
+#ifdef BDE_DEBUGGER
+#define BDBTRAP(name) \
+ ss ; \
+ cmpb $0,_bdb_exists ; \
+ je 1f ; \
+ testb $SEL_RPL_MASK,4(%esp) ; \
+ jne 1f ; \
+ ss ; \
+ .globl bdb_/**/name/**/_ljmp ; \
+bdb_/**/name/**/_ljmp: ; \
+ ljmp $0,$0 ; \
+1:
+#else
+#define BDBTRAP(name)
+#endif
+
+#ifdef KGDB
+# define BPTTRAP(a) testl $PSL_I,4+8(%esp) ; je 1f ; sti ; 1: ; \
+ pushl $(a) ; jmp _bpttraps
+#else
+# define BPTTRAP(a) testl $PSL_I,4+8(%esp) ; je 1f ; sti ; 1: ; TRAP(a)
+#endif
+
+MCOUNT_LABEL(user)
+MCOUNT_LABEL(btrap)
+
+IDTVEC(div)
+ pushl $0; TRAP(T_DIVIDE)
+IDTVEC(dbg)
+ BDBTRAP(dbg)
+ pushl $0; BPTTRAP(T_TRCTRAP)
+IDTVEC(nmi)
+ pushl $0; TRAP(T_NMI)
+IDTVEC(bpt)
+ BDBTRAP(bpt)
+ pushl $0; BPTTRAP(T_BPTFLT)
+IDTVEC(ofl)
+ pushl $0; TRAP(T_OFLOW)
+IDTVEC(bnd)
+ pushl $0; TRAP(T_BOUND)
+IDTVEC(ill)
+ pushl $0; TRAP(T_PRIVINFLT)
+IDTVEC(dna)
+ pushl $0; TRAP(T_DNA)
+IDTVEC(dble)
+ TRAP(T_DOUBLEFLT)
+IDTVEC(fpusegm)
+ pushl $0; TRAP(T_FPOPFLT)
+IDTVEC(tss)
+ TRAP(T_TSSFLT)
+IDTVEC(missing)
+ TRAP(T_SEGNPFLT)
+IDTVEC(stk)
+ TRAP(T_STKFLT)
+IDTVEC(prot)
+ TRAP(T_PROTFLT)
+IDTVEC(page)
+ TRAP(T_PAGEFLT)
+IDTVEC(rsvd)
+ pushl $0; TRAP(T_RESERVED)
+IDTVEC(fpu)
+#if NNPX > 0
+ /*
+ * Handle like an interrupt so that we can call npxintr to clear the
+ * error. It would be better to handle npx interrupts as traps but
+ * this is difficult for nested interrupts.
+ */
+ pushl $0 /* dumby error code */
+ pushl $0 /* dumby trap type */
+ pushal
+ pushl %ds
+ pushl %es /* now the stack frame is a trap frame */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ FAKE_MCOUNT(12*4(%esp))
+ movl _cpl,%eax
+ pushl %eax
+ pushl $0 /* dummy unit to finish building intr frame */
+ incl _cnt+V_TRAP
+ orl $SWI_AST_MASK,%eax
+ movl %eax,_cpl
+ call _npxintr
+ MEXITCOUNT
+ jmp _doreti
+#else /* NNPX > 0 */
+ pushl $0; TRAP(T_ARITHTRAP)
+#endif /* NNPX > 0 */
+ /* 17 - 31 reserved for future exp */
+IDTVEC(rsvd0)
+ pushl $0; TRAP(17)
+IDTVEC(rsvd1)
+ pushl $0; TRAP(18)
+IDTVEC(rsvd2)
+ pushl $0; TRAP(19)
+IDTVEC(rsvd3)
+ pushl $0; TRAP(20)
+IDTVEC(rsvd4)
+ pushl $0; TRAP(21)
+IDTVEC(rsvd5)
+ pushl $0; TRAP(22)
+IDTVEC(rsvd6)
+ pushl $0; TRAP(23)
+IDTVEC(rsvd7)
+ pushl $0; TRAP(24)
+IDTVEC(rsvd8)
+ pushl $0; TRAP(25)
+IDTVEC(rsvd9)
+ pushl $0; TRAP(26)
+IDTVEC(rsvd10)
+ pushl $0; TRAP(27)
+IDTVEC(rsvd11)
+ pushl $0; TRAP(28)
+IDTVEC(rsvd12)
+ pushl $0; TRAP(29)
+IDTVEC(rsvd13)
+ pushl $0; TRAP(30)
+IDTVEC(rsvd14)
+ pushl $0; TRAP(31)
+
+ SUPERALIGN_TEXT
+_alltraps:
+ pushal
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ FAKE_MCOUNT(12*4(%esp))
+calltrap:
+ FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
+ incl _cnt+V_TRAP
+ orl $SWI_AST_MASK,_cpl
+ call _trap
+ /*
+ * There was no place to save the cpl so we have to recover it
+ * indirectly. For traps from user mode it was 0, and for traps
+ * from kernel mode Oring SWI_AST_MASK into it didn't change it.
+ */
+ subl %eax,%eax
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
+ jne 1f
+ movl _cpl,%eax
+1:
+ /*
+ * Return via _doreti to handle ASTs. Have to change trap frame
+ * to interrupt frame.
+ */
+ pushl %eax
+ subl $4,%esp
+ MEXITCOUNT
+ jmp _doreti
+
+#ifdef KGDB
+/*
+ * This code checks for a kgdb trap, then falls through
+ * to the regular trap code.
+ */
+ SUPERALIGN_TEXT
+_bpttraps:
+ pushal
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ FAKE_MCOUNT(12*4(%esp))
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) /* non-kernel mode? */
+ jne calltrap /* yes */
+ call _kgdb_trap_glue
+ MEXITCOUNT
+ jmp calltrap
+#endif
+
+/*
+ * Call gate entry for syscall
+ */
+ SUPERALIGN_TEXT
+IDTVEC(syscall)
+ pushfl /* Room for tf_err */
+ pushfl /* Room for tf_trapno */
+ pushal
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax /* switch to kernel segments */
+ movl %ax,%ds
+ movl %ax,%es
+ movl TF_ERR(%esp),%eax /* copy eflags from tf_err to fs_eflags */
+ movl %eax,TF_EFLAGS(%esp)
+ FAKE_MCOUNT(12*4(%esp))
+ incl _cnt+V_SYSCALL
+ movl $SWI_AST_MASK,_cpl
+ call _syscall
+ /*
+ * Return via _doreti to handle ASTs.
+ */
+ pushl $0 /* cpl to restore */
+ subl $4,%esp
+ MEXITCOUNT
+ jmp _doreti
+
+/*
+ * include generated interrupt vectors and ISA intr code
+ */
+#include "i386/isa/vector.s"
+#include "i386/isa/icu.s"
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
new file mode 100644
index 0000000..a3ce1e2
--- /dev/null
+++ b/sys/amd64/amd64/fpu.c
@@ -0,0 +1,554 @@
+/*-
+ * Copyright (c) 1990 William Jolitz.
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
+ * $Id: npx.c,v 1.6 1994/01/03 07:55:43 davidg Exp $
+ */
+
+#include "npx.h"
+#if NNPX > 0
+
+#include "param.h"
+#include "systm.h"
+#include "conf.h"
+#include "file.h"
+#include "proc.h"
+#include "machine/cpu.h"
+#include "machine/pcb.h"
+#include "machine/trap.h"
+#include "ioctl.h"
+#include "machine/specialreg.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/isa_device.h"
+#include "i386/isa/isa.h"
+
+/*
+ * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
+ */
+
+#ifdef __GNUC__
+
+#define disable_intr() __asm("cli")
+#define enable_intr() __asm("sti")
+#define fldcw(addr) __asm("fldcw %0" : : "m" (*addr))
+#define fnclex() __asm("fnclex")
+#define fninit() __asm("fninit")
+#define fnsave(addr) __asm("fnsave %0" : "=m" (*addr) : "0" (*addr))
+#define fnstcw(addr) __asm("fnstcw %0" : "=m" (*addr) : "0" (*addr))
+#define fnstsw(addr) __asm("fnstsw %0" : "=m" (*addr) : "0" (*addr))
+#define fp_divide_by_0() __asm("fldz; fld1; fdiv %st,%st(1); fwait")
+#define frstor(addr) __asm("frstor %0" : : "m" (*addr))
+#define fwait() __asm("fwait")
+#define read_eflags() ({u_long ef; \
+ __asm("pushf; popl %0" : "=a" (ef)); \
+ ef; })
+#define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
+ : : "n" (CR0_TS) : "ax")
+#define stop_emulating() __asm("clts")
+#define write_eflags(ef) __asm("pushl %0; popf" : : "a" ((u_long) ef))
+
+#else /* not __GNUC__ */
+
+void disable_intr __P((void));
+void enable_intr __P((void));
+void fldcw __P((caddr_t addr));
+void fnclex __P((void));
+void fninit __P((void));
+void fnsave __P((caddr_t addr));
+void fnstcw __P((caddr_t addr));
+void fnstsw __P((caddr_t addr));
+void fp_divide_by_0 __P((void));
+void frstor __P((caddr_t addr));
+void fwait __P((void));
+u_long read_eflags __P((void));
+void start_emulating __P((void));
+void stop_emulating __P((void));
+void write_eflags __P((u_long ef));
+
+#endif /* __GNUC__ */
+
+typedef u_char bool_t;
+
+extern struct gate_descriptor idt[];
+
+int npxdna __P((void));
+void npxexit __P((struct proc *p));
+void npxinit __P((u_int control));
+void npxintr __P((struct intrframe frame));
+void npxsave __P((struct save87 *addr));
+static int npxattach __P((struct isa_device *dvp));
+static int npxprobe __P((struct isa_device *dvp));
+static int npxprobe1 __P((struct isa_device *dvp));
+
+struct isa_driver npxdriver = {
+ npxprobe, npxattach, "npx",
+};
+
+u_int npx0_imask;
+struct proc *npxproc;
+
+static bool_t npx_ex16;
+static bool_t npx_exists;
+static struct gate_descriptor npx_idt_probeintr;
+static int npx_intrno;
+static volatile u_int npx_intrs_while_probing;
+static bool_t npx_irq13;
+static volatile u_int npx_traps_while_probing;
+
+/*
+ * Special interrupt handlers. Someday intr0-intr15 will be used to count
+ * interrupts. We'll still need a special exception 16 handler. The busy
+ * latch stuff in probintr() can be moved to npxprobe().
+ */
+void probeintr(void);
+asm
+("
+ .text
+_probeintr:
+ ss
+ incl _npx_intrs_while_probing
+ pushl %eax
+ movb $0x20,%al # EOI (asm in strings loses cpp features)
+ outb %al,$0xa0 # IO_ICU2
+ outb %al,$0x20 #IO_ICU1
+ movb $0,%al
+ outb %al,$0xf0 # clear BUSY# latch
+ popl %eax
+ iret
+");
+
+void probetrap(void);
+asm
+("
+ .text
+_probetrap:
+ ss
+ incl _npx_traps_while_probing
+ fnclex
+ iret
+");
+
+/*
+ * Probe routine. Initialize cr0 to give correct behaviour for [f]wait
+ * whether the device exists or not (XXX should be elsewhere). Set flags
+ * to tell npxattach() what to do. Modify device struct if npx doesn't
+ * need to use interrupts. Return 1 if device exists.
+ */
+static int
+npxprobe(dvp)
+ struct isa_device *dvp;
+{
+ int result;
+ u_long save_eflags;
+ u_char save_icu1_mask;
+ u_char save_icu2_mask;
+ struct gate_descriptor save_idt_npxintr;
+ struct gate_descriptor save_idt_npxtrap;
+ /*
+ * This routine is now just a wrapper for npxprobe1(), to install
+ * special npx interrupt and trap handlers, to enable npx interrupts
+ * and to disable other interrupts. Someday isa_configure() will
+ * install suitable handlers and run with interrupts enabled so we
+ * won't need to do so much here.
+ */
+ npx_intrno = NRSVIDT + ffs(dvp->id_irq) - 1;
+ save_eflags = read_eflags();
+ disable_intr();
+ save_icu1_mask = inb(IO_ICU1 + 1);
+ save_icu2_mask = inb(IO_ICU2 + 1);
+ save_idt_npxintr = idt[npx_intrno];
+ save_idt_npxtrap = idt[16];
+ outb(IO_ICU1 + 1, ~(IRQ_SLAVE | dvp->id_irq));
+ outb(IO_ICU2 + 1, ~(dvp->id_irq >> 8));
+ setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL);
+ setidt(npx_intrno, probeintr, SDT_SYS386IGT, SEL_KPL);
+ npx_idt_probeintr = idt[npx_intrno];
+ enable_intr();
+ result = npxprobe1(dvp);
+ disable_intr();
+ outb(IO_ICU1 + 1, save_icu1_mask);
+ outb(IO_ICU2 + 1, save_icu2_mask);
+ idt[npx_intrno] = save_idt_npxintr;
+ idt[16] = save_idt_npxtrap;
+ write_eflags(save_eflags);
+ return (result);
+}
+
+static int
+npxprobe1(dvp)
+ struct isa_device *dvp;
+{
+ int control;
+ int status;
+#ifdef lint
+ npxintr();
+#endif
+ /*
+ * Partially reset the coprocessor, if any. Some BIOS's don't reset
+ * it after a warm boot.
+ */
+ outb(0xf1, 0); /* full reset on some systems, NOP on others */
+ outb(0xf0, 0); /* clear BUSY# latch */
+ /*
+ * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
+ * instructions. We must set the CR0_MP bit and use the CR0_TS
+ * bit to control the trap, because setting the CR0_EM bit does
+ * not cause WAIT instructions to trap. It's important to trap
+ * WAIT instructions - otherwise the "wait" variants of no-wait
+ * control instructions would degenerate to the "no-wait" variants
+ * after FP context switches but work correctly otherwise. It's
+ * particularly important to trap WAITs when there is no NPX -
+ * otherwise the "wait" variants would always degenerate.
+ *
+ * Try setting CR0_NE to get correct error reporting on 486DX's.
+ * Setting it should fail or do nothing on lesser processors.
+ */
+ load_cr0(rcr0() | CR0_MP | CR0_NE);
+ /*
+ * But don't trap while we're probing.
+ */
+ stop_emulating();
+ /*
+ * Finish resetting the coprocessor, if any. If there is an error
+ * pending, then we may get a bogus IRQ13, but probeintr() will handle
+ * it OK. Bogus halts have never been observed, but we enabled
+ * IRQ13 and cleared the BUSY# latch early to handle them anyway.
+ */
+ fninit();
+ DELAY(1000); /* wait for any IRQ13 (fwait might hang) */
+#ifdef DIAGNOSTIC
+ if (npx_intrs_while_probing != 0)
+ printf("fninit caused %u bogus npx interrupt(s)\n",
+ npx_intrs_while_probing);
+ if (npx_traps_while_probing != 0)
+ printf("fninit caused %u bogus npx trap(s)\n",
+ npx_traps_while_probing);
+#endif
+ /*
+ * Check for a status of mostly zero.
+ */
+ status = 0x5a5a;
+ fnstsw(&status);
+ if ((status & 0xb8ff) == 0) {
+ /*
+ * Good, now check for a proper control word.
+ */
+ control = 0x5a5a;
+ fnstcw(&control);
+ if ((control & 0x1f3f) == 0x033f) {
+ npx_exists = 1;
+ /*
+ * We have an npx, now divide by 0 to see if exception
+ * 16 works.
+ */
+ control &= ~(1 << 2); /* enable divide by 0 trap */
+ fldcw(&control);
+ npx_traps_while_probing = npx_intrs_while_probing = 0;
+ fp_divide_by_0();
+ if (npx_traps_while_probing != 0) {
+ /*
+ * Good, exception 16 works.
+ */
+ npx_ex16 = 1;
+ dvp->id_irq = 0; /* zap the interrupt */
+ /*
+ * special return value to flag that we do not
+ * actually use any I/O registers
+ */
+ return (-1);
+ }
+ if (npx_intrs_while_probing != 0) {
+ /*
+ * Bad, we are stuck with IRQ13.
+ */
+ npx_irq13 = 1;
+ npx0_imask = dvp->id_irq; /* npxattach too late */
+ return (IO_NPXSIZE);
+ }
+ /*
+ * Worse, even IRQ13 is broken. Use emulator.
+ */
+ }
+ }
+ /*
+ * Probe failed, but we want to get to npxattach to initialize the
+ * emulator and say that it has been installed. XXX handle devices
+ * that aren't really devices better.
+ */
+ dvp->id_irq = 0;
+ /*
+ * special return value to flag that we do not
+ * actually use any I/O registers
+ */
+ return (-1);
+}
+
+/*
+ * Attach routine - announce which it is, and wire into system
+ */
+int
+npxattach(dvp)
+ struct isa_device *dvp;
+{
+ if (!npx_ex16 && !npx_irq13) {
+ if (npx_exists) {
+ printf("npx%d: Error reporting broken, using 387 emulator\n",dvp->id_unit);
+ npx_exists = 0;
+ } else {
+ printf("npx%d: 387 Emulator\n",dvp->id_unit);
+ }
+ }
+ npxinit(__INITIAL_NPXCW__);
+ return (1); /* XXX unused */
+}
+
+/*
+ * Initialize floating point unit.
+ */
+void
+npxinit(control)
+ u_int control;
+{
+ struct save87 dummy;
+
+ if (!npx_exists)
+ return;
+ /*
+ * fninit has the same h/w bugs as fnsave. Use the detoxified
+ * fnsave to throw away any junk in the fpu. fnsave initializes
+ * the fpu and sets npxproc = NULL as important side effects.
+ */
+ npxsave(&dummy);
+ stop_emulating();
+ fldcw(&control);
+ if (curpcb != NULL)
+ fnsave(&curpcb->pcb_savefpu);
+ start_emulating();
+}
+
+/*
+ * Free coprocessor (if we have it).
+ */
+void
+npxexit(p)
+ struct proc *p;
+{
+
+ if (p == npxproc) {
+ start_emulating();
+ npxproc = NULL;
+ }
+}
+
+/*
+ * Record the FPU state and reinitialize it all except for the control word.
+ * Then generate a SIGFPE.
+ *
+ * Reinitializing the state allows naive SIGFPE handlers to longjmp without
+ * doing any fixups.
+ *
+ * XXX there is currently no way to pass the full error state to signal
+ * handlers, and if this is a nested interrupt there is no way to pass even
+ * a status code! So there is no way to have a non-naive SIGFPE handler. At
+ * best a handler could do an fninit followed by an fldcw of a static value.
+ * fnclex would be of little use because it would leave junk on the FPU stack.
+ * Returning from the handler would be even less safe than usual because
+ * IRQ13 exception handling makes exceptions even less precise than usual.
+ */
+void
+npxintr(frame)
+ struct intrframe frame;
+{
+ int code;
+
+ if (npxproc == NULL || !npx_exists) {
+ /* XXX no %p in stand/printf.c. Cast to quiet gcc -Wall. */
+ printf("npxintr: npxproc = %lx, curproc = %lx, npx_exists = %d\n",
+ (u_long) npxproc, (u_long) curproc, npx_exists);
+ panic("npxintr from nowhere");
+ }
+ if (npxproc != curproc) {
+ printf("npxintr: npxproc = %lx, curproc = %lx, npx_exists = %d\n",
+ (u_long) npxproc, (u_long) curproc, npx_exists);
+ panic("npxintr from non-current process");
+ }
+ /*
+ * Save state. This does an implied fninit. It had better not halt
+ * the cpu or we'll hang.
+ */
+ outb(0xf0, 0);
+ fnsave(&curpcb->pcb_savefpu);
+ fwait();
+ /*
+ * Restore control word (was clobbered by fnsave).
+ */
+ fldcw(&curpcb->pcb_savefpu.sv_env.en_cw);
+ fwait();
+ /*
+ * Remember the exception status word and tag word. The current
+ * (almost fninit'ed) fpu state is in the fpu and the exception
+ * state just saved will soon be junk. However, the implied fninit
+ * doesn't change the error pointers or register contents, and we
+ * preserved the control word and will copy the status and tag
+ * words, so the complete exception state can be recovered.
+ */
+ curpcb->pcb_savefpu.sv_ex_sw = curpcb->pcb_savefpu.sv_env.en_sw;
+ curpcb->pcb_savefpu.sv_ex_tw = curpcb->pcb_savefpu.sv_env.en_tw;
+
+ /*
+ * Pass exception to process.
+ */
+ if (ISPL(frame.if_cs) == SEL_UPL) {
+ /*
+ * Interrupt is essentially a trap, so we can afford to call
+ * the SIGFPE handler (if any) as soon as the interrupt
+ * returns.
+ *
+ * XXX little or nothing is gained from this, and plenty is
+ * lost - the interrupt frame has to contain the trap frame
+ * (this is otherwise only necessary for the rescheduling trap
+ * in doreti, and the frame for that could easily be set up
+ * just before it is used).
+ */
+ curproc->p_md.md_regs = (int *)&frame.if_es;
+#ifdef notyet
+ /*
+ * Encode the appropriate code for detailed information on
+ * this exception.
+ */
+ code = XXX_ENCODE(curpcb->pcb_savefpu.sv_ex_sw);
+#else
+ code = 0; /* XXX */
+#endif
+ trapsignal(curproc, SIGFPE, code);
+ } else {
+ /*
+ * Nested interrupt. These losers occur when:
+ * o an IRQ13 is bogusly generated at a bogus time, e.g.:
+ * o immediately after an fnsave or frstor of an
+ * error state.
+ * o a couple of 386 instructions after
+ * "fstpl _memvar" causes a stack overflow.
+ * These are especially nasty when combined with a
+ * trace trap.
+ * o an IRQ13 occurs at the same time as another higher-
+ * priority interrupt.
+ *
+ * Treat them like a true async interrupt.
+ */
+ psignal(npxproc, SIGFPE);
+ }
+}
+
+/*
+ * Implement device not available (DNA) exception
+ *
+ * It would be better to switch FP context here (only). This would require
+ * saving the state in the proc table instead of in the pcb.
+ */
+int
+npxdna()
+{
+ if (!npx_exists)
+ return (0);
+ if (npxproc != NULL) {
+ printf("npxdna: npxproc = %lx, curproc = %lx\n",
+ (u_long) npxproc, (u_long) curproc);
+ panic("npxdna");
+ }
+ stop_emulating();
+ /*
+ * Record new context early in case frstor causes an IRQ13.
+ */
+ npxproc = curproc;
+ /*
+ * The following frstor may cause an IRQ13 when the state being
+ * restored has a pending error. The error will appear to have been
+ * triggered by the current (npx) user instruction even when that
+ * instruction is a no-wait instruction that should not trigger an
+ * error (e.g., fnclex). On at least one 486 system all of the
+ * no-wait instructions are broken the same as frstor, so our
+ * treatment does not amplify the breakage. On at least one
+ * 386/Cyrix 387 system, fnclex works correctly while frstor and
+ * fnsave are broken, so our treatment breaks fnclex if it is the
+ * first FPU instruction after a context switch.
+ */
+ frstor(&curpcb->pcb_savefpu);
+
+ return (1);
+}
+
+/*
+ * Wrapper for fnsave instruction to handle h/w bugs. If there is an error
+ * pending, then fnsave generates a bogus IRQ13 on some systems. Force
+ * any IRQ13 to be handled immediately, and then ignore it. This routine is
+ * often called at splhigh so it must not use many system services. In
+ * particular, it's much easier to install a special handler than to
+ * guarantee that it's safe to use npxintr() and its supporting code.
+ */
+void
+npxsave(addr)
+ struct save87 *addr;
+{
+ u_char icu1_mask;
+ u_char icu2_mask;
+ u_char old_icu1_mask;
+ u_char old_icu2_mask;
+ struct gate_descriptor save_idt_npxintr;
+
+ disable_intr();
+ old_icu1_mask = inb(IO_ICU1 + 1);
+ old_icu2_mask = inb(IO_ICU2 + 1);
+ save_idt_npxintr = idt[npx_intrno];
+ outb(IO_ICU1 + 1, old_icu1_mask & ~(IRQ_SLAVE | npx0_imask));
+ outb(IO_ICU2 + 1, old_icu2_mask & ~(npx0_imask >> 8));
+ idt[npx_intrno] = npx_idt_probeintr;
+ enable_intr();
+ stop_emulating();
+ fnsave(addr);
+ fwait();
+ start_emulating();
+ npxproc = NULL;
+ disable_intr();
+ icu1_mask = inb(IO_ICU1 + 1); /* masks may have changed */
+ icu2_mask = inb(IO_ICU2 + 1);
+ outb(IO_ICU1 + 1,
+ (icu1_mask & ~npx0_imask) | (old_icu1_mask & npx0_imask));
+ outb(IO_ICU2 + 1,
+ (icu2_mask & ~(npx0_imask >> 8))
+ | (old_icu2_mask & (npx0_imask >> 8)));
+ idt[npx_intrno] = save_idt_npxintr;
+ enable_intr(); /* back to usual state */
+}
+
+#endif /* NNPX > 0 */
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
new file mode 100644
index 0000000..a75d1f1
--- /dev/null
+++ b/sys/amd64/amd64/genassym.c
@@ -0,0 +1,192 @@
+/*-
+ * Copyright (c) 1982, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
+ * $Id: genassym.c,v 1.6 1993/11/13 02:24:59 davidg Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/buf.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <machine/cpu.h>
+#include <machine/trap.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <sys/syscall.h>
+#include <vm/vm.h>
+#include <sys/user.h>
+
+main()
+{
+ struct proc *p = (struct proc *)0;
+ struct vmmeter *vm = (struct vmmeter *)0;
+ struct user *up = (struct user *)0;
+ struct rusage *rup = (struct rusage *)0;
+ struct uprof *uprof = (struct uprof *)0;
+ struct vmspace *vms = (struct vmspace *)0;
+ vm_map_t map = (vm_map_t)0;
+ pmap_t pmap = (pmap_t)0;
+ struct pcb *pcb = (struct pcb *)0;
+ struct trapframe *tf = (struct trapframe *)0;
+ struct sigframe *sigf = (struct sigframe *)0;
+ register unsigned i;
+
+ printf("#define\tI386_CR3PAT %d\n", I386_CR3PAT);
+ printf("#define\tUDOT_SZ %d\n", sizeof(struct user));
+ printf("#define\tP_LINK %d\n", &p->p_forw);
+ printf("#define\tP_RLINK %d\n", &p->p_back);
+ printf("#define\tP_VMSPACE %d\n", &p->p_vmspace);
+ printf("#define\tVM_PMAP %d\n", &vms->vm_pmap);
+ printf("#define\tP_ADDR %d\n", &p->p_addr);
+ printf("#define\tP_PRI %d\n", &p->p_priority);
+ printf("#define\tP_STAT %d\n", &p->p_stat);
+ printf("#define\tP_WCHAN %d\n", &p->p_wchan);
+ printf("#define\tP_FLAG %d\n", &p->p_flag);
+ printf("#define\tP_PID %d\n", &p->p_pid);
+ printf("#define\tSSLEEP %d\n", SSLEEP);
+ printf("#define\tSRUN %d\n", SRUN);
+ printf("#define\tV_SWTCH %d\n", &vm->v_swtch);
+ printf("#define\tV_TRAP %d\n", &vm->v_trap);
+ printf("#define\tV_SYSCALL %d\n", &vm->v_syscall);
+ printf("#define\tV_INTR %d\n", &vm->v_intr);
+ printf("#define\tV_SOFT %d\n", &vm->v_soft);
+/* printf("#define\tV_PDMA %d\n", &vm->v_pdma); */
+ printf("#define\tV_FAULTS %d\n", &vm->v_faults);
+/* printf("#define\tV_PGREC %d\n", &vm->v_pgrec); */
+/* printf("#define\tV_FASTPGREC %d\n", &vm->v_fastpgrec); */
+ printf("#define\tUPAGES %d\n", UPAGES);
+ printf("#define\tHIGHPAGES %d\n", HIGHPAGES);
+ printf("#define\tCLSIZE %d\n", CLSIZE);
+ printf("#define\tNBPG %d\n", NBPG);
+ printf("#define\tNPTEPG %d\n", NPTEPG);
+ printf("#define\tPDESIZE %d\n", PDESIZE);
+ printf("#define\tPTESIZE %d\n", PTESIZE);
+ printf("#define\tNKPDE %d\n", NKPDE);
+ printf("#define\tNKPT %d\n", NKPT);
+ printf("#define\tKPTDI 0x%x\n", KPTDI);
+ printf("#define\tKSTKPTDI 0x%x\n", KSTKPTDI);
+ printf("#define\tKSTKPTEOFF 0x%x\n", KSTKPTEOFF);
+ printf("#define\tPTDPTDI 0x%x\n", PTDPTDI);
+ printf("#define\tAPTDPTDI 0x%x\n", APTDPTDI);
+ printf("#define\tPGSHIFT %d\n", PGSHIFT);
+ printf("#define\tPDRSHIFT %d\n", PDRSHIFT);
+ printf("#define\tSYSPTSIZE %d\n", SYSPTSIZE);
+ printf("#define\tUSRPTSIZE %d\n", USRPTSIZE);
+ printf("#define\tUSRIOSIZE %d\n", USRIOSIZE);
+#ifdef SYSVSHM
+ printf("#define\tSHMMAXPGS %d\n", SHMMAXPGS);
+#endif
+ printf("#define\tUSRSTACK 0x%x\n", USRSTACK);
+ printf("#define\tVM_MAXUSER_ADDRESS 0x%x\n", VM_MAXUSER_ADDRESS);
+ printf("#define\tKERNBASE 0x%x\n", KERNBASE);
+ printf("#define\tMSGBUFPTECNT %d\n", btoc(sizeof (struct msgbuf)));
+ printf("#define\tNMBCLUSTERS %d\n", NMBCLUSTERS);
+ printf("#define\tMCLBYTES %d\n", MCLBYTES);
+ printf("#define\tPCB_LINK %d\n", &pcb->pcb_tss.tss_link);
+ printf("#define\tPCB_ESP0 %d\n", &pcb->pcb_tss.tss_esp0);
+ printf("#define\tPCB_SS0 %d\n", &pcb->pcb_tss.tss_ss0);
+ printf("#define\tPCB_ESP1 %d\n", &pcb->pcb_tss.tss_esp1);
+ printf("#define\tPCB_SS1 %d\n", &pcb->pcb_tss.tss_ss1);
+ printf("#define\tPCB_ESP2 %d\n", &pcb->pcb_tss.tss_esp2);
+ printf("#define\tPCB_SS2 %d\n", &pcb->pcb_tss.tss_ss2);
+ printf("#define\tPCB_CR3 %d\n", &pcb->pcb_tss.tss_cr3);
+ printf("#define\tPCB_EIP %d\n", &pcb->pcb_tss.tss_eip);
+ printf("#define\tPCB_EFLAGS %d\n", &pcb->pcb_tss.tss_eflags);
+ printf("#define\tPCB_EAX %d\n", &pcb->pcb_tss.tss_eax);
+ printf("#define\tPCB_ECX %d\n", &pcb->pcb_tss.tss_ecx);
+ printf("#define\tPCB_EDX %d\n", &pcb->pcb_tss.tss_edx);
+ printf("#define\tPCB_EBX %d\n", &pcb->pcb_tss.tss_ebx);
+ printf("#define\tPCB_ESP %d\n", &pcb->pcb_tss.tss_esp);
+ printf("#define\tPCB_EBP %d\n", &pcb->pcb_tss.tss_ebp);
+ printf("#define\tPCB_ESI %d\n", &pcb->pcb_tss.tss_esi);
+ printf("#define\tPCB_EDI %d\n", &pcb->pcb_tss.tss_edi);
+ printf("#define\tPCB_ES %d\n", &pcb->pcb_tss.tss_es);
+ printf("#define\tPCB_CS %d\n", &pcb->pcb_tss.tss_cs);
+ printf("#define\tPCB_SS %d\n", &pcb->pcb_tss.tss_ss);
+ printf("#define\tPCB_DS %d\n", &pcb->pcb_tss.tss_ds);
+ printf("#define\tPCB_FS %d\n", &pcb->pcb_tss.tss_fs);
+ printf("#define\tPCB_GS %d\n", &pcb->pcb_tss.tss_gs);
+ printf("#define\tPCB_LDT %d\n", &pcb->pcb_tss.tss_ldt);
+ printf("#define\tPCB_USERLDT %d\n", &pcb->pcb_ldt);
+ printf("#define\tPCB_IOOPT %d\n", &pcb->pcb_tss.tss_ioopt);
+ printf("#define\tU_PROF %d\n", &up->u_stats.p_prof);
+ printf("#define\tU_PROFSCALE %d\n", &up->u_stats.p_prof.pr_scale);
+ printf("#define\tPR_BASE %d\n", &uprof->pr_base);
+ printf("#define\tPR_SIZE %d\n", &uprof->pr_size);
+ printf("#define\tPR_OFF %d\n", &uprof->pr_off);
+ printf("#define\tPR_SCALE %d\n", &uprof->pr_scale);
+ printf("#define\tRU_MINFLT %d\n", &rup->ru_minflt);
+ printf("#define\tPCB_FLAGS %d\n", &pcb->pcb_flags);
+ printf("#define\tPCB_SAVEFPU %d\n", &pcb->pcb_savefpu);
+ printf("#define\tFP_USESEMC %d\n", FP_USESEMC);
+ printf("#define\tPCB_SAVEEMC %d\n", &pcb->pcb_saveemc);
+ printf("#define\tPCB_CMAP2 %d\n", &pcb->pcb_cmap2);
+ printf("#define\tPCB_IML %d\n", &pcb->pcb_iml);
+ printf("#define\tPCB_ONFAULT %d\n", &pcb->pcb_onfault);
+
+ printf("#define\tTF_ES %d\n", &tf->tf_es);
+ printf("#define\tTF_DS %d\n", &tf->tf_ds);
+ printf("#define\tTF_EDI %d\n", &tf->tf_edi);
+ printf("#define\tTF_ESI %d\n", &tf->tf_esi);
+ printf("#define\tTF_EBP %d\n", &tf->tf_ebp);
+ printf("#define\tTF_ISP %d\n", &tf->tf_isp);
+ printf("#define\tTF_EBX %d\n", &tf->tf_ebx);
+ printf("#define\tTF_EDX %d\n", &tf->tf_edx);
+ printf("#define\tTF_ECX %d\n", &tf->tf_ecx);
+ printf("#define\tTF_EAX %d\n", &tf->tf_eax);
+ printf("#define\tTF_TRAPNO %d\n", &tf->tf_trapno);
+ printf("#define\tTF_ERR %d\n", &tf->tf_err);
+ printf("#define\tTF_EIP %d\n", &tf->tf_eip);
+ printf("#define\tTF_CS %d\n", &tf->tf_cs);
+ printf("#define\tTF_EFLAGS %d\n", &tf->tf_eflags);
+ printf("#define\tTF_ESP %d\n", &tf->tf_esp);
+ printf("#define\tTF_SS %d\n", &tf->tf_ss);
+
+ printf("#define\tSIGF_SIGNUM %d\n", &sigf->sf_signum);
+ printf("#define\tSIGF_CODE %d\n", &sigf->sf_code);
+ printf("#define\tSIGF_SCP %d\n", &sigf->sf_scp);
+ printf("#define\tSIGF_HANDLER %d\n", &sigf->sf_handler);
+ printf("#define\tSIGF_SC %d\n", &sigf->sf_sc);
+
+ printf("#define\tB_READ %d\n", B_READ);
+ printf("#define\tENOENT %d\n", ENOENT);
+ printf("#define\tEFAULT %d\n", EFAULT);
+ printf("#define\tENAMETOOLONG %d\n", ENAMETOOLONG);
+ exit(0);
+}
+
diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S
new file mode 100644
index 0000000..538c690
--- /dev/null
+++ b/sys/amd64/amd64/locore.S
@@ -0,0 +1,518 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
+ * $Id: locore.s,v 1.15 1994/02/01 04:08:54 davidg Exp $
+ */
+
+/*
+ * locore.s: FreeBSD machine support for the Intel 386
+ * originally from: locore.s, by William F. Jolitz
+ *
+ * Substantially rewritten by David Greenman, Rod Grimes,
+ * Bruce Evans, Wolfgang Solfrank, and many others.
+ */
+
+#include "npx.h" /* for NNPX */
+#include "assym.s" /* system definitions */
+#include "machine/psl.h" /* processor status longword defs */
+#include "machine/pte.h" /* page table entry definitions */
+#include "errno.h" /* error return codes */
+#include "machine/specialreg.h" /* x86 special registers */
+#include "machine/cputypes.h" /* x86 cpu type definitions */
+#include "syscall.h" /* system call numbers */
+#include "machine/asmacros.h" /* miscellaneous asm macros */
+
+/*
+ * XXX
+ *
+ * Note: This version greatly munged to avoid various assembler errors
+ * that may be fixed in newer versions of gas. Perhaps newer versions
+ * will have more pleasant appearance.
+ */
+
+/*
+ * PTmap is recursive pagemap at top of virtual address space.
+ * Within PTmap, the page directory can be found (third indirection).
+ */
+ .globl _PTmap,_PTD,_PTDpde,_Sysmap
+ .set _PTmap,PTDPTDI << PDRSHIFT
+ .set _PTD,_PTmap + (PTDPTDI * NBPG)
+ .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
+
+/* Sysmap is the base address of the kernel page tables */
+ .set _Sysmap,_PTmap + (KPTDI * NBPG)
+
+/*
+ * APTmap, APTD is the alternate recursive pagemap.
+ * It's used when modifying another process's page tables.
+ */
+ .globl _APTmap,_APTD,_APTDpde
+ .set _APTmap,APTDPTDI << PDRSHIFT
+ .set _APTD,_APTmap + (APTDPTDI * NBPG)
+ .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
+
+/*
+ * Access to each processes kernel stack is via a region of
+ * per-process address space (at the beginning), immediatly above
+ * the user process stack.
+ */
+ .set _kstack,USRSTACK
+ .globl _kstack
+
+/*
+ * Globals
+ */
+ .data
+ .globl _esym
+_esym: .long 0 /* ptr to end of syms */
+
+ .globl _boothowto,_bootdev,_curpcb
+
+ .globl _cpu,_cold,_atdevbase
+_cpu: .long 0 /* are we 386, 386sx, or 486 */
+_cold: .long 1 /* cold till we are not */
+_atdevbase: .long 0 /* location of start of iomem in virtual */
+_atdevphys: .long 0 /* location of device mapping ptes (phys) */
+
+ .globl _KERNend
+_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
+
+ .globl _IdlePTD,_KPTphys
+_IdlePTD: .long 0 /* phys addr of kernel PTD */
+_KPTphys: .long 0 /* phys addr of kernel page tables */
+
+ .globl _cyloffset
+_cyloffset: .long 0 /* cylinder offset from boot blocks */
+
+ .globl _proc0paddr
+_proc0paddr: .long 0 /* address of proc 0 address space */
+
+#ifdef BDE_DEBUGGER
+ .globl _bdb_exists /* flag to indicate BDE debugger is available */
+_bdb_exists: .long 0
+#endif
+
+ .globl tmpstk
+ .space 0x1000
+tmpstk:
+
+
+/*
+ * System Initialization
+ */
+ .text
+
+/*
+ * btext: beginning of text section.
+ * Also the entry point (jumped to directly from the boot blocks).
+ */
+NON_GPROF_ENTRY(btext)
+ movw $0x1234,0x472 /* warm boot */
+ jmp 1f
+ .org 0x500 /* space for BIOS variables */
+
+ /*
+ * pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
+ * note: (%esp) is return address of boot
+ * ( if we want to hold onto /boot, it's physical %esp up to _end)
+ */
+
+ 1: movl 4(%esp),%eax
+ movl %eax,_boothowto-KERNBASE
+ movl 8(%esp),%eax
+ movl %eax,_bootdev-KERNBASE
+ movl 12(%esp),%eax
+ movl %eax,_cyloffset-KERNBASE
+ movl 16(%esp),%eax
+ addl $KERNBASE,%eax
+ movl %eax,_esym-KERNBASE
+#ifdef DISKLESS /* Copy diskless structure */
+ movl _nfs_diskless_size-KERNBASE,%ecx
+ movl 20(%esp),%esi
+ movl $(_nfs_diskless-KERNBASE),%edi
+ cld
+ rep
+ movsb
+#endif
+
+ /* find out our CPU type. */
+ pushfl
+ popl %eax
+ movl %eax,%ecx
+ xorl $0x40000,%eax
+ pushl %eax
+ popfl
+ pushfl
+ popl %eax
+ xorl %ecx,%eax
+ shrl $18,%eax
+ andl $1,%eax
+ push %ecx
+ popfl
+
+ cmpl $0,%eax
+ jne 1f
+ movl $CPU_386,_cpu-KERNBASE
+ jmp 2f
+1: movl $CPU_486,_cpu-KERNBASE
+2:
+
+ /*
+ * Finished with old stack; load new %esp now instead of later so
+ * we can trace this code without having to worry about the trace
+ * trap clobbering the memory test or the zeroing of the bss+bootstrap
+ * page tables.
+ *
+ * XXX - wdboot clears the bss after testing that this is safe.
+ * This is too wasteful - memory below 640K is scarce. The boot
+ * program should check:
+ * text+data <= &stack_variable - more_space_for_stack
+ * text+data+bss+pad+space_for_page_tables <= end_of_memory
+ * Oops, the gdt is in the carcass of the boot program so clearing
+ * the rest of memory is still not possible.
+ */
+ movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
+
+/*
+ * Virtual address space of kernel:
+ *
+ * text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
+ * pages: 1 UPAGES (2) 1 NKPT (7)
+ */
+
+/* find end of kernel image */
+ movl $_end-KERNBASE,%ecx
+ addl $NBPG-1,%ecx /* page align up */
+ andl $~(NBPG-1),%ecx
+ movl %ecx,%esi /* esi = start of free memory */
+ movl %ecx,_KERNend-KERNBASE /* save end of kernel */
+
+/* clear bss */
+ movl $_edata-KERNBASE,%edi
+ subl %edi,%ecx /* get amount to clear */
+ xorl %eax,%eax /* specify zero fill */
+ cld
+ rep
+ stosb
+
+/*
+ * The value in esi is both the end of the kernel bss and a pointer to
+ * the kernel page directory, and is used by the rest of locore to build
+ * the tables.
+ * esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPT(number of kernel
+ * page table pages) is then passed on the stack to init386(first) as
+ * the value first. esi should ALWAYS be page aligned!!
+ */
+ movl %esi,%ecx /* Get current first availiable address */
+
+/* clear pagetables, page directory, stack, etc... */
+ movl %esi,%edi /* base (page directory) */
+ movl $((1+UPAGES+1+NKPT)*NBPG),%ecx /* amount to clear */
+ xorl %eax,%eax /* specify zero fill */
+ cld
+ rep
+ stosb
+
+/* physical address of Idle proc/kernel page directory */
+ movl %esi,_IdlePTD-KERNBASE
+
+/*
+ * fillkpt
+ * eax = (page frame address | control | status) == pte
+ * ebx = address of page table
+ * ecx = how many pages to map
+ */
+#define fillkpt \
+1: movl %eax,(%ebx) ; \
+ addl $NBPG,%eax ; /* increment physical address */ \
+ addl $4,%ebx ; /* next pte */ \
+ loop 1b ;
+
+/*
+ * Map Kernel
+ *
+ * First step - build page tables
+ */
+#if defined (KGDB) || defined (BDE_DEBUGGER)
+ movl _KERNend-KERNBASE,%ecx /* this much memory, */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+#ifdef BDE_DEBUGGER
+ cmpl $0xa0,%ecx /* XXX - cover debugger pages */
+ jae 1f
+ movl $0xa0,%ecx
+1:
+#endif /* BDE_DEBUGGER */
+ movl $PG_V|PG_KW|PG_NC_PWT,%eax /* kernel R/W, valid, cache write-through */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
+ fillkpt
+
+#else /* !KGDB && !BDE_DEBUGGER */
+ /* write protect kernel text (doesn't do a thing for 386's - only 486's) */
+ movl $_etext-KERNBASE,%ecx /* get size of text */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+ movl $PG_V|PG_KR,%eax /* specify read only */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
+ fillkpt
+
+ /* data and bss are r/w */
+ andl $PG_FRAME,%eax /* strip to just addr of bss */
+ movl _KERNend-KERNBASE,%ecx /* calculate size */
+ subl %eax,%ecx
+ shrl $PGSHIFT,%ecx
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ fillkpt
+#endif /* KGDB || BDE_DEBUGGER */
+
+/* now initialize the page dir, upages, p0stack PT, and page tables */
+
+ movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
+ movl %esi,%eax /* phys address of PTD */
+ andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
+ orl $PG_V|PG_KW|PG_NC_PWT,%eax /* valid, kernel read/write, cache write-though */
+ movl %esi,%ebx /* calculate pte offset to ptd */
+ shrl $PGSHIFT-2,%ebx
+ addl %esi,%ebx /* address of page directory */
+ addl $((1+UPAGES+1)*NBPG),%ebx /* offset to kernel page tables */
+ fillkpt
+
+/* map I/O memory map */
+
+ movl _KPTphys-KERNBASE,%ebx /* base of kernel page tables */
+ lea (0xa0 * PTESIZE)(%ebx),%ebx /* hardwire ISA hole at KERNBASE + 0xa0000 */
+ movl $0x100-0xa0,%ecx /* for this many pte s, */
+ movl $(0xa0000|PG_V|PG_KW|PG_N),%eax /* valid, kernel read/write, non-cacheable */
+ movl %ebx,_atdevphys-KERNBASE /* save phys addr of ptes */
+ fillkpt
+
+ /* map proc 0's kernel stack into user page table page */
+
+ movl $UPAGES,%ecx /* for this many pte s, */
+ lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
+ lea (KERNBASE)(%eax),%edx /* change into virtual addr */
+ movl %edx,_proc0paddr-KERNBASE /* save VA for proc 0 init */
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ lea ((1+UPAGES)*NBPG)(%esi),%ebx /* addr of stack page table in proc 0 */
+ addl $(KSTKPTEOFF * PTESIZE),%ebx /* offset to kernel stack PTE */
+ fillkpt
+
+/*
+ * Initialize kernel page table directory
+ */
+ /* install a pde for temporary double map of bottom of VA */
+ movl _KPTphys-KERNBASE,%eax
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ movl %eax,(%esi) /* which is where temp maps! */
+
+ /* initialize kernel pde's */
+ movl $(NKPT),%ecx /* for this many PDEs */
+ lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
+ fillkpt
+
+ /* install a pde recursively mapping page directory as a page table! */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,PTDPTDI*PDESIZE(%esi) /* which is where PTmap maps! */
+
+ /* install a pde to map kernel stack for proc 0 */
+ lea ((1+UPAGES)*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,KSTKPTDI*PDESIZE(%esi) /* which is where kernel stack maps! */
+
+#ifdef BDE_DEBUGGER
+ /* copy and convert stuff from old gdt and idt for debugger */
+
+ cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
+ jne 1f
+ movb $1,_bdb_exists-KERNBASE
+1:
+ pushal
+ subl $2*6,%esp
+
+ sgdt (%esp)
+ movl 2(%esp),%esi /* base address of current gdt */
+ movl $_gdt-KERNBASE,%edi
+ movl %edi,2(%esp)
+ movl $8*18/4,%ecx
+ cld
+ rep /* copy gdt */
+ movsl
+ movl $_gdt-KERNBASE,-8+2(%edi) /* adjust gdt self-ptr */
+ movb $0x92,-8+5(%edi)
+
+ sidt 6(%esp)
+ movl 6+2(%esp),%esi /* base address of current idt */
+ movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
+ movw 8(%esi),%ax
+ movl %eax,bdb_dbg_ljmp+1-KERNBASE /* ... immediate offset ... */
+ movl 8+2(%esi),%eax
+ movw %ax,bdb_dbg_ljmp+5-KERNBASE /* ... and selector for ljmp */
+ movl 24+4(%esi),%eax /* same for bpt descriptor */
+ movw 24(%esi),%ax
+ movl %eax,bdb_bpt_ljmp+1-KERNBASE
+ movl 24+2(%esi),%eax
+ movw %ax,bdb_bpt_ljmp+5-KERNBASE
+
+ movl $_idt-KERNBASE,%edi
+ movl %edi,6+2(%esp)
+ movl $8*4/4,%ecx
+ cld
+ rep /* copy idt */
+ movsl
+
+ lgdt (%esp)
+ lidt 6(%esp)
+
+ addl $2*6,%esp
+ popal
+#endif /* BDE_DEBUGGER */
+
+ /* load base of page directory and enable mapping */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3 /* load ptd addr into mmu */
+ movl %cr0,%eax /* get control word */
+ orl $CR0_PE|CR0_PG,%eax /* enable paging */
+ movl %eax,%cr0 /* and let's page NOW! */
+
+ pushl $begin /* jump to high mem */
+ ret
+
+begin: /* now running relocated at KERNBASE where the system is linked to run */
+
+ .globl _Crtat /* XXX - locore should not know about */
+ movl _Crtat,%eax /* variables of device drivers (pccons)! */
+ subl $(KERNBASE+0xA0000),%eax
+ movl _atdevphys,%edx /* get pte PA */
+ subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
+ shll $PGSHIFT-2,%edx /* corresponding to virt offset */
+ addl $KERNBASE,%edx /* add virtual base */
+ movl %edx,_atdevbase
+ addl %eax,%edx
+ movl %edx,_Crtat
+
+ /* set up bootstrap stack - 48 bytes */
+ movl $_kstack+UPAGES*NBPG-4*12,%esp /* bootstrap stack end location */
+ xorl %eax,%eax /* mark end of frames */
+ movl %eax,%ebp
+ movl _proc0paddr,%eax
+ movl %esi,PCB_CR3(%eax)
+
+#ifdef BDE_DEBUGGER
+ /* relocate debugger gdt entries */
+
+ movl $_gdt+8*9,%eax /* adjust slots 9-17 */
+ movl $9,%ecx
+reloc_gdt:
+ movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */
+ addl $8,%eax /* now KERNBASE>>24 */
+ loop reloc_gdt
+
+ cmpl $0,_bdb_exists
+ je 1f
+ int $3
+1:
+#endif /* BDE_DEBUGGER */
+
+ /*
+ * Skip over the page tables and the kernel stack
+ */
+ lea ((1+UPAGES+1+NKPT)*NBPG)(%esi),%esi
+
+ pushl %esi /* value of first for init386(first) */
+ call _init386 /* wire 386 chip for unix operation */
+ popl %esi
+
+#if 0
+ movl $0,_PTD
+#endif
+
+ .globl __ucodesel,__udatasel
+
+ pushl $0 /* unused */
+ pushl __udatasel /* ss */
+ pushl $0 /* esp - filled in by execve() */
+ pushl $0x3200 /* eflags (ring 3, int enab) */
+ pushl __ucodesel /* cs */
+ pushl $0 /* eip - filled in by execve() */
+ subl $(12*4),%esp /* space for rest of registers */
+
+ pushl %esp /* call main with frame pointer */
+ call _main /* autoconfiguration, mountroot etc */
+
+ addl $(13*4),%esp /* back to a frame we can return with */
+
+ /*
+ * now we've run main() and determined what cpu-type we are, we can
+ * enable WP mode on i486 cpus and above.
+ */
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ je 1f
+ movl %cr0,%eax /* get control word */
+ orl $CR0_WP,%eax /* enable write protect for all modes */
+ movl %eax,%cr0 /* and do it */
+#endif
+ /*
+ * on return from main(), we are process 1
+ * set up address space and stack so that we can 'return' to user mode
+ */
+1:
+ movl __ucodesel,%eax
+ movl __udatasel,%ecx
+
+ movl %cx,%ds
+ movl %cx,%es
+ movl %ax,%fs /* double map cs to fs */
+ movl %cx,%gs /* and ds to gs */
+ iret /* goto user! */
+
+#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
+
+NON_GPROF_ENTRY(sigcode)
+ call SIGF_HANDLER(%esp)
+ lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
+ /* copy at 8(%esp)) */
+ pushl %eax
+ pushl %eax /* junk to fake return address */
+ movl $103,%eax /* XXX sigreturn() */
+ LCALL(0x7,0) /* enter kernel with args on stack */
+ hlt /* never gets here */
+
+ .globl _szsigcode
+_szsigcode:
+ .long _szsigcode-_sigcode
diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s
new file mode 100644
index 0000000..538c690
--- /dev/null
+++ b/sys/amd64/amd64/locore.s
@@ -0,0 +1,518 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
+ * $Id: locore.s,v 1.15 1994/02/01 04:08:54 davidg Exp $
+ */
+
+/*
+ * locore.s: FreeBSD machine support for the Intel 386
+ * originally from: locore.s, by William F. Jolitz
+ *
+ * Substantially rewritten by David Greenman, Rod Grimes,
+ * Bruce Evans, Wolfgang Solfrank, and many others.
+ */
+
+#include "npx.h" /* for NNPX */
+#include "assym.s" /* system definitions */
+#include "machine/psl.h" /* processor status longword defs */
+#include "machine/pte.h" /* page table entry definitions */
+#include "errno.h" /* error return codes */
+#include "machine/specialreg.h" /* x86 special registers */
+#include "machine/cputypes.h" /* x86 cpu type definitions */
+#include "syscall.h" /* system call numbers */
+#include "machine/asmacros.h" /* miscellaneous asm macros */
+
+/*
+ * XXX
+ *
+ * Note: This version greatly munged to avoid various assembler errors
+ * that may be fixed in newer versions of gas. Perhaps newer versions
+ * will have more pleasant appearance.
+ */
+
+/*
+ * PTmap is recursive pagemap at top of virtual address space.
+ * Within PTmap, the page directory can be found (third indirection).
+ */
+ .globl _PTmap,_PTD,_PTDpde,_Sysmap
+ .set _PTmap,PTDPTDI << PDRSHIFT
+ .set _PTD,_PTmap + (PTDPTDI * NBPG)
+ .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
+
+/* Sysmap is the base address of the kernel page tables */
+ .set _Sysmap,_PTmap + (KPTDI * NBPG)
+
+/*
+ * APTmap, APTD is the alternate recursive pagemap.
+ * It's used when modifying another process's page tables.
+ */
+ .globl _APTmap,_APTD,_APTDpde
+ .set _APTmap,APTDPTDI << PDRSHIFT
+ .set _APTD,_APTmap + (APTDPTDI * NBPG)
+ .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
+
+/*
+ * Access to each processes kernel stack is via a region of
+ * per-process address space (at the beginning), immediatly above
+ * the user process stack.
+ */
+ .set _kstack,USRSTACK
+ .globl _kstack
+
+/*
+ * Globals
+ */
+ .data
+ .globl _esym
+_esym: .long 0 /* ptr to end of syms */
+
+ .globl _boothowto,_bootdev,_curpcb
+
+ .globl _cpu,_cold,_atdevbase
+_cpu: .long 0 /* are we 386, 386sx, or 486 */
+_cold: .long 1 /* cold till we are not */
+_atdevbase: .long 0 /* location of start of iomem in virtual */
+_atdevphys: .long 0 /* location of device mapping ptes (phys) */
+
+ .globl _KERNend
+_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
+
+ .globl _IdlePTD,_KPTphys
+_IdlePTD: .long 0 /* phys addr of kernel PTD */
+_KPTphys: .long 0 /* phys addr of kernel page tables */
+
+ .globl _cyloffset
+_cyloffset: .long 0 /* cylinder offset from boot blocks */
+
+ .globl _proc0paddr
+_proc0paddr: .long 0 /* address of proc 0 address space */
+
+#ifdef BDE_DEBUGGER
+ .globl _bdb_exists /* flag to indicate BDE debugger is available */
+_bdb_exists: .long 0
+#endif
+
+ .globl tmpstk
+ .space 0x1000
+tmpstk:
+
+
+/*
+ * System Initialization
+ */
+ .text
+
+/*
+ * btext: beginning of text section.
+ * Also the entry point (jumped to directly from the boot blocks).
+ */
+NON_GPROF_ENTRY(btext)
+ movw $0x1234,0x472 /* warm boot */
+ jmp 1f
+ .org 0x500 /* space for BIOS variables */
+
+ /*
+ * pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
+ * note: (%esp) is return address of boot
+ * ( if we want to hold onto /boot, it's physical %esp up to _end)
+ */
+
+ 1: movl 4(%esp),%eax
+ movl %eax,_boothowto-KERNBASE
+ movl 8(%esp),%eax
+ movl %eax,_bootdev-KERNBASE
+ movl 12(%esp),%eax
+ movl %eax,_cyloffset-KERNBASE
+ movl 16(%esp),%eax
+ addl $KERNBASE,%eax
+ movl %eax,_esym-KERNBASE
+#ifdef DISKLESS /* Copy diskless structure */
+ movl _nfs_diskless_size-KERNBASE,%ecx
+ movl 20(%esp),%esi
+ movl $(_nfs_diskless-KERNBASE),%edi
+ cld
+ rep
+ movsb
+#endif
+
+ /* find out our CPU type. */
+ pushfl
+ popl %eax
+ movl %eax,%ecx
+ xorl $0x40000,%eax
+ pushl %eax
+ popfl
+ pushfl
+ popl %eax
+ xorl %ecx,%eax
+ shrl $18,%eax
+ andl $1,%eax
+ push %ecx
+ popfl
+
+ cmpl $0,%eax
+ jne 1f
+ movl $CPU_386,_cpu-KERNBASE
+ jmp 2f
+1: movl $CPU_486,_cpu-KERNBASE
+2:
+
+ /*
+ * Finished with old stack; load new %esp now instead of later so
+ * we can trace this code without having to worry about the trace
+ * trap clobbering the memory test or the zeroing of the bss+bootstrap
+ * page tables.
+ *
+ * XXX - wdboot clears the bss after testing that this is safe.
+ * This is too wasteful - memory below 640K is scarce. The boot
+ * program should check:
+ * text+data <= &stack_variable - more_space_for_stack
+ * text+data+bss+pad+space_for_page_tables <= end_of_memory
+ * Oops, the gdt is in the carcass of the boot program so clearing
+ * the rest of memory is still not possible.
+ */
+ movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
+
+/*
+ * Virtual address space of kernel:
+ *
+ * text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
+ * pages: 1 UPAGES (2) 1 NKPT (7)
+ */
+
+/* find end of kernel image */
+ movl $_end-KERNBASE,%ecx
+ addl $NBPG-1,%ecx /* page align up */
+ andl $~(NBPG-1),%ecx
+ movl %ecx,%esi /* esi = start of free memory */
+ movl %ecx,_KERNend-KERNBASE /* save end of kernel */
+
+/* clear bss */
+ movl $_edata-KERNBASE,%edi
+ subl %edi,%ecx /* get amount to clear */
+ xorl %eax,%eax /* specify zero fill */
+ cld
+ rep
+ stosb
+
+/*
+ * The value in esi is both the end of the kernel bss and a pointer to
+ * the kernel page directory, and is used by the rest of locore to build
+ * the tables.
+ * esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPT(number of kernel
+ * page table pages) is then passed on the stack to init386(first) as
+ * the value first. esi should ALWAYS be page aligned!!
+ */
+ movl %esi,%ecx /* Get current first availiable address */
+
+/* clear pagetables, page directory, stack, etc... */
+ movl %esi,%edi /* base (page directory) */
+ movl $((1+UPAGES+1+NKPT)*NBPG),%ecx /* amount to clear */
+ xorl %eax,%eax /* specify zero fill */
+ cld
+ rep
+ stosb
+
+/* physical address of Idle proc/kernel page directory */
+ movl %esi,_IdlePTD-KERNBASE
+
+/*
+ * fillkpt
+ * eax = (page frame address | control | status) == pte
+ * ebx = address of page table
+ * ecx = how many pages to map
+ */
+#define fillkpt \
+1: movl %eax,(%ebx) ; \
+ addl $NBPG,%eax ; /* increment physical address */ \
+ addl $4,%ebx ; /* next pte */ \
+ loop 1b ;
+
+/*
+ * Map Kernel
+ *
+ * First step - build page tables
+ */
+#if defined (KGDB) || defined (BDE_DEBUGGER)
+ movl _KERNend-KERNBASE,%ecx /* this much memory, */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+#ifdef BDE_DEBUGGER
+ cmpl $0xa0,%ecx /* XXX - cover debugger pages */
+ jae 1f
+ movl $0xa0,%ecx
+1:
+#endif /* BDE_DEBUGGER */
+ movl $PG_V|PG_KW|PG_NC_PWT,%eax /* kernel R/W, valid, cache write-through */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
+ fillkpt
+
+#else /* !KGDB && !BDE_DEBUGGER */
+ /* write protect kernel text (doesn't do a thing for 386's - only 486's) */
+ movl $_etext-KERNBASE,%ecx /* get size of text */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+ movl $PG_V|PG_KR,%eax /* specify read only */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
+ fillkpt
+
+ /* data and bss are r/w */
+ andl $PG_FRAME,%eax /* strip to just addr of bss */
+ movl _KERNend-KERNBASE,%ecx /* calculate size */
+ subl %eax,%ecx
+ shrl $PGSHIFT,%ecx
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ fillkpt
+#endif /* KGDB || BDE_DEBUGGER */
+
+/* now initialize the page dir, upages, p0stack PT, and page tables */
+
+ movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
+ movl %esi,%eax /* phys address of PTD */
+ andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
+ orl $PG_V|PG_KW|PG_NC_PWT,%eax /* valid, kernel read/write, cache write-though */
+ movl %esi,%ebx /* calculate pte offset to ptd */
+ shrl $PGSHIFT-2,%ebx
+ addl %esi,%ebx /* address of page directory */
+ addl $((1+UPAGES+1)*NBPG),%ebx /* offset to kernel page tables */
+ fillkpt
+
+/* map I/O memory map */
+
+ movl _KPTphys-KERNBASE,%ebx /* base of kernel page tables */
+ lea (0xa0 * PTESIZE)(%ebx),%ebx /* hardwire ISA hole at KERNBASE + 0xa0000 */
+ movl $0x100-0xa0,%ecx /* for this many pte s, */
+ movl $(0xa0000|PG_V|PG_KW|PG_N),%eax /* valid, kernel read/write, non-cacheable */
+ movl %ebx,_atdevphys-KERNBASE /* save phys addr of ptes */
+ fillkpt
+
+ /* map proc 0's kernel stack into user page table page */
+
+ movl $UPAGES,%ecx /* for this many pte s, */
+ lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
+ lea (KERNBASE)(%eax),%edx /* change into virtual addr */
+ movl %edx,_proc0paddr-KERNBASE /* save VA for proc 0 init */
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ lea ((1+UPAGES)*NBPG)(%esi),%ebx /* addr of stack page table in proc 0 */
+ addl $(KSTKPTEOFF * PTESIZE),%ebx /* offset to kernel stack PTE */
+ fillkpt
+
+/*
+ * Initialize kernel page table directory
+ */
+ /* install a pde for temporary double map of bottom of VA */
+ movl _KPTphys-KERNBASE,%eax
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ movl %eax,(%esi) /* which is where temp maps! */
+
+ /* initialize kernel pde's */
+ movl $(NKPT),%ecx /* for this many PDEs */
+ lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
+ fillkpt
+
+ /* install a pde recursively mapping page directory as a page table! */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,PTDPTDI*PDESIZE(%esi) /* which is where PTmap maps! */
+
+ /* install a pde to map kernel stack for proc 0 */
+ lea ((1+UPAGES)*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,KSTKPTDI*PDESIZE(%esi) /* which is where kernel stack maps! */
+
+#ifdef BDE_DEBUGGER
+ /* copy and convert stuff from old gdt and idt for debugger */
+
+ cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
+ jne 1f
+ movb $1,_bdb_exists-KERNBASE
+1:
+ pushal
+ subl $2*6,%esp
+
+ sgdt (%esp)
+ movl 2(%esp),%esi /* base address of current gdt */
+ movl $_gdt-KERNBASE,%edi
+ movl %edi,2(%esp)
+ movl $8*18/4,%ecx
+ cld
+ rep /* copy gdt */
+ movsl
+ movl $_gdt-KERNBASE,-8+2(%edi) /* adjust gdt self-ptr */
+ movb $0x92,-8+5(%edi)
+
+ sidt 6(%esp)
+ movl 6+2(%esp),%esi /* base address of current idt */
+ movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
+ movw 8(%esi),%ax
+ movl %eax,bdb_dbg_ljmp+1-KERNBASE /* ... immediate offset ... */
+ movl 8+2(%esi),%eax
+ movw %ax,bdb_dbg_ljmp+5-KERNBASE /* ... and selector for ljmp */
+ movl 24+4(%esi),%eax /* same for bpt descriptor */
+ movw 24(%esi),%ax
+ movl %eax,bdb_bpt_ljmp+1-KERNBASE
+ movl 24+2(%esi),%eax
+ movw %ax,bdb_bpt_ljmp+5-KERNBASE
+
+ movl $_idt-KERNBASE,%edi
+ movl %edi,6+2(%esp)
+ movl $8*4/4,%ecx
+ cld
+ rep /* copy idt */
+ movsl
+
+ lgdt (%esp)
+ lidt 6(%esp)
+
+ addl $2*6,%esp
+ popal
+#endif /* BDE_DEBUGGER */
+
+ /* load base of page directory and enable mapping */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3 /* load ptd addr into mmu */
+ movl %cr0,%eax /* get control word */
+ orl $CR0_PE|CR0_PG,%eax /* enable paging */
+ movl %eax,%cr0 /* and let's page NOW! */
+
+ pushl $begin /* jump to high mem */
+ ret
+
+begin: /* now running relocated at KERNBASE where the system is linked to run */
+
+ .globl _Crtat /* XXX - locore should not know about */
+ movl _Crtat,%eax /* variables of device drivers (pccons)! */
+ subl $(KERNBASE+0xA0000),%eax
+ movl _atdevphys,%edx /* get pte PA */
+ subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
+ shll $PGSHIFT-2,%edx /* corresponding to virt offset */
+ addl $KERNBASE,%edx /* add virtual base */
+ movl %edx,_atdevbase
+ addl %eax,%edx
+ movl %edx,_Crtat
+
+ /* set up bootstrap stack - 48 bytes */
+ movl $_kstack+UPAGES*NBPG-4*12,%esp /* bootstrap stack end location */
+ xorl %eax,%eax /* mark end of frames */
+ movl %eax,%ebp
+ movl _proc0paddr,%eax
+ movl %esi,PCB_CR3(%eax)
+
+#ifdef BDE_DEBUGGER
+ /* relocate debugger gdt entries */
+
+ movl $_gdt+8*9,%eax /* adjust slots 9-17 */
+ movl $9,%ecx
+reloc_gdt:
+ movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */
+ addl $8,%eax /* now KERNBASE>>24 */
+ loop reloc_gdt
+
+ cmpl $0,_bdb_exists
+ je 1f
+ int $3
+1:
+#endif /* BDE_DEBUGGER */
+
+ /*
+ * Skip over the page tables and the kernel stack
+ */
+ lea ((1+UPAGES+1+NKPT)*NBPG)(%esi),%esi
+
+ pushl %esi /* value of first for init386(first) */
+ call _init386 /* wire 386 chip for unix operation */
+ popl %esi
+
+#if 0
+ movl $0,_PTD
+#endif
+
+ .globl __ucodesel,__udatasel
+
+ pushl $0 /* unused */
+ pushl __udatasel /* ss */
+ pushl $0 /* esp - filled in by execve() */
+ pushl $0x3200 /* eflags (ring 3, int enab) */
+ pushl __ucodesel /* cs */
+ pushl $0 /* eip - filled in by execve() */
+ subl $(12*4),%esp /* space for rest of registers */
+
+ pushl %esp /* call main with frame pointer */
+ call _main /* autoconfiguration, mountroot etc */
+
+ addl $(13*4),%esp /* back to a frame we can return with */
+
+ /*
+ * now we've run main() and determined what cpu-type we are, we can
+ * enable WP mode on i486 cpus and above.
+ */
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ je 1f
+ movl %cr0,%eax /* get control word */
+ orl $CR0_WP,%eax /* enable write protect for all modes */
+ movl %eax,%cr0 /* and do it */
+#endif
+ /*
+ * on return from main(), we are process 1
+ * set up address space and stack so that we can 'return' to user mode
+ */
+1:
+ movl __ucodesel,%eax
+ movl __udatasel,%ecx
+
+ movl %cx,%ds
+ movl %cx,%es
+ movl %ax,%fs /* double map cs to fs */
+ movl %cx,%gs /* and ds to gs */
+ iret /* goto user! */
+
+#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
+
+NON_GPROF_ENTRY(sigcode)
+ call SIGF_HANDLER(%esp)
+ lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
+ /* copy at 8(%esp)) */
+ pushl %eax
+ pushl %eax /* junk to fake return address */
+ movl $103,%eax /* XXX sigreturn() */
+ LCALL(0x7,0) /* enter kernel with args on stack */
+ hlt /* never gets here */
+
+ .globl _szsigcode
+_szsigcode:
+ .long _szsigcode-_sigcode
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
new file mode 100644
index 0000000..70f65bf
--- /dev/null
+++ b/sys/amd64/amd64/machdep.c
@@ -0,0 +1,1556 @@
+/*-
+ * Copyright (c) 1992 Terrence R. Lambert.
+ * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
+ * $Id: machdep.c,v 1.41 1994/03/30 02:31:11 davidg Exp $
+ */
+
+#include "npx.h"
+#include "isa.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#include <sys/reboot.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/callout.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/sysctl.h>
+
+#ifdef SYSVSHM
+#include "sys/shm.h"
+#endif
+
+#ifdef SYSVMSG
+#include "msg.h"
+#endif
+
+#ifdef SYSVSEM
+#include "sem.h"
+#endif
+
+#include "vm/vm.h"
+#include "vm/vm_kern.h"
+#include "vm/vm_page.h"
+
+#include "sys/exec.h"
+#include "sys/vnode.h"
+
+extern vm_offset_t avail_start, avail_end;
+
+#include "machine/cpu.h"
+#include "machine/reg.h"
+#include "machine/psl.h"
+#include "machine/specialreg.h"
+#include "machine/sysarch.h"
+#include "machine/cons.h"
+
+#include "i386/isa/isa.h"
+#include "i386/isa/rtc.h"
+
+static void identifycpu(void);
+static void initcpu(void);
+static int test_page(int *, int);
+
+extern int grow(struct proc *,u_int);
+const char machine[] = "PC-Class";
+const char *cpu_model;
+
+#ifndef PANIC_REBOOT_WAIT_TIME
+#define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
+#endif
+
+/*
+ * Declare these as initialized data so we can patch them.
+ */
+int nswbuf = 0;
+#ifdef NBUF
+int nbuf = NBUF;
+#else
+int nbuf = 0;
+#endif
+#ifdef BUFPAGES
+int bufpages = BUFPAGES;
+#else
+int bufpages = 0;
+#endif
+#ifdef BOUNCEPAGES
+int bouncepages = BOUNCEPAGES;
+#else
+int bouncepages = 0;
+#endif
+int msgbufmapped = 0; /* set when safe to use msgbuf */
+extern int freebufspace;
+extern char *bouncememory;
+
+int _udatasel, _ucodesel;
+
+/*
+ * Machine-dependent startup code
+ */
+int boothowto = 0, Maxmem = 0, badpages = 0, physmem = 0;
+long dumplo;
+extern int bootdev;
+int biosmem;
+
+vm_offset_t phys_avail[6];
+
+extern cyloffset;
+
+int cpu_class;
+
+void dumpsys __P((void));
+vm_offset_t buffer_sva, buffer_eva;
+vm_offset_t clean_sva, clean_eva;
+vm_offset_t pager_sva, pager_eva;
+int maxbkva, pager_map_size;
+
+#define offsetof(type, member) ((size_t)(&((type *)0)->member))
+
+void
+cpu_startup()
+{
+ register int unixsize;
+ register unsigned i;
+ register struct pte *pte;
+ int mapaddr, j;
+ register caddr_t v;
+ int maxbufs, base, residual;
+ extern long Usrptsize;
+ vm_offset_t minaddr, maxaddr;
+ vm_size_t size = 0;
+ int firstaddr;
+
+ /*
+ * Initialize error message buffer (at end of core).
+ */
+
+ /* avail_end was pre-decremented in init_386() to compensate */
+ for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
+ pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp,
+ avail_end + i * NBPG,
+ VM_PROT_ALL, TRUE);
+ msgbufmapped = 1;
+
+ /*
+ * Good {morning,afternoon,evening,night}.
+ */
+ printf(version);
+ identifycpu();
+ printf("real memory = %d (%d pages)\n", ptoa(physmem), physmem);
+ if (badpages)
+ printf("bad memory = %d (%d pages)\n", ptoa(badpages), badpages);
+
+ /*
+ * Allocate space for system data structures.
+ * The first available kernel virtual address is in "v".
+ * As pages of kernel virtual memory are allocated, "v" is incremented.
+ * As pages of memory are allocated and cleared,
+ * "firstaddr" is incremented.
+ * An index into the kernel page table corresponding to the
+ * virtual memory address maintained in "v" is kept in "mapaddr".
+ */
+
+ /*
+ * Make two passes. The first pass calculates how much memory is
+ * needed and allocates it. The second pass assigns virtual
+ * addresses to the various data structures.
+ */
+ firstaddr = 0;
+again:
+ v = (caddr_t)firstaddr;
+
+#define valloc(name, type, num) \
+ (name) = (type *)v; v = (caddr_t)((name)+(num))
+#define valloclim(name, type, num, lim) \
+ (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
+ valloc(callout, struct callout, ncallout);
+#ifdef SYSVSHM
+ valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
+#endif
+#ifdef SYSVSEM
+ valloc(sema, struct semid_ds, seminfo.semmni);
+ valloc(sem, struct sem, seminfo.semmns);
+ /* This is pretty disgusting! */
+ valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
+#endif
+#ifdef SYSVMSG
+ valloc(msgpool, char, msginfo.msgmax);
+ valloc(msgmaps, struct msgmap, msginfo.msgseg);
+ valloc(msghdrs, struct msg, msginfo.msgtql);
+ valloc(msqids, struct msqid_ds, msginfo.msgmni);
+#endif
+ /*
+ * Determine how many buffers to allocate.
+ * Use 20% of memory of memory beyond the first 2MB
+ * Insure a minimum of 16 fs buffers.
+ * We allocate 1/2 as many swap buffer headers as file i/o buffers.
+ */
+ if (bufpages == 0)
+ bufpages = ((physmem << PGSHIFT) - 2048*1024) / NBPG / 5;
+ if (bufpages < 64)
+ bufpages = 64;
+
+ /*
+ * We must still limit the maximum number of buffers to be no
+ * more than 2/5's of the size of the kernal malloc region, this
+ * will only take effect for machines with lots of memory
+ */
+ bufpages = min(bufpages, (VM_KMEM_SIZE / NBPG) * 2 / 5);
+ if (nbuf == 0) {
+ nbuf = bufpages / 2;
+ if (nbuf < 32)
+ nbuf = 32;
+ }
+ freebufspace = bufpages * NBPG;
+ if (nswbuf == 0) {
+ nswbuf = (nbuf / 2) &~ 1; /* force even */
+ if (nswbuf > 256)
+ nswbuf = 256; /* sanity */
+ }
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
+
+#ifndef NOBOUNCE
+ /*
+ * If there is more than 16MB of memory, allocate some bounce buffers
+ */
+ if (Maxmem > 4096) {
+ if (bouncepages == 0)
+ bouncepages = 96; /* largest physio size + extra */
+ v = (caddr_t)((vm_offset_t)((vm_offset_t)v + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
+ valloc(bouncememory, char, bouncepages * PAGE_SIZE);
+ }
+#endif
+
+ /*
+ * End of first pass, size has been calculated so allocate memory
+ */
+ if (firstaddr == 0) {
+ size = (vm_size_t)(v - firstaddr);
+ firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
+ if (firstaddr == 0)
+ panic("startup: no room for tables");
+ goto again;
+ }
+
+ /*
+ * End of second pass, addresses have been assigned
+ */
+ if ((vm_size_t)(v - firstaddr) != size)
+ panic("startup: table size inconsistency");
+
+ clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
+ (nbuf*MAXBSIZE) + VM_PHYS_SIZE + maxbkva + pager_map_size, TRUE);
+
+ io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
+ pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
+ pager_map_size, TRUE);
+
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf * MAXBSIZE), TRUE);
+ /*
+ * Allocate a submap for physio
+ */
+ phys_map = kmem_suballoc(clean_map, &minaddr, &maxaddr,
+ VM_PHYS_SIZE, TRUE);
+
+ /*
+ * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
+ * we use the more space efficient malloc in place of kmem_alloc.
+ */
+ mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
+ M_MBUF, M_NOWAIT);
+ bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
+ VM_MBUF_SIZE, FALSE);
+ /*
+ * Initialize callouts
+ */
+ callfree = callout;
+ for (i = 1; i < ncallout; i++)
+ callout[i-1].c_next = &callout[i];
+
+ printf("avail memory = %d (%d pages)\n", ptoa(cnt.v_free_count), cnt.v_free_count);
+ printf("using %d buffers containing %d bytes of memory\n",
+ nbuf, bufpages * CLBYTES);
+
+#ifndef NOBOUNCE
+ /*
+ * init bounce buffers
+ */
+ vm_bounce_init();
+#endif
+
+ /*
+ * Set up CPU-specific registers, cache, etc.
+ */
+ initcpu();
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+
+ /*
+ * Configure the system.
+ */
+ configure();
+}
+
+
+struct cpu_nameclass i386_cpus[] = {
+ { "Intel 80286", CPUCLASS_286 }, /* CPU_286 */
+ { "i386SX", CPUCLASS_386 }, /* CPU_386SX */
+ { "i386DX", CPUCLASS_386 }, /* CPU_386 */
+ { "i486SX", CPUCLASS_486 }, /* CPU_486SX */
+ { "i486DX", CPUCLASS_486 }, /* CPU_486 */
+ { "i586", CPUCLASS_586 }, /* CPU_586 */
+};
+
+static void
+identifycpu()
+{
+ printf("CPU: ");
+ if (cpu >= 0 && cpu < (sizeof i386_cpus/sizeof(struct cpu_nameclass))) {
+ printf("%s", i386_cpus[cpu].cpu_name);
+ cpu_class = i386_cpus[cpu].cpu_class;
+ cpu_model = i386_cpus[cpu].cpu_name;
+ } else {
+ printf("unknown cpu type %d\n", cpu);
+ panic("startup: bad cpu id");
+ }
+ printf(" (");
+ switch(cpu_class) {
+ case CPUCLASS_286:
+ printf("286");
+ break;
+ case CPUCLASS_386:
+ printf("386");
+ break;
+ case CPUCLASS_486:
+ printf("486");
+ break;
+ case CPUCLASS_586:
+ printf("586");
+ break;
+ default:
+ printf("unknown"); /* will panic below... */
+ }
+ printf("-class CPU)");
+ printf("\n"); /* cpu speed would be nice, but how? */
+
+ /*
+ * Now that we have told the user what they have,
+ * let them know if that machine type isn't configured.
+ */
+ switch (cpu_class) {
+ case CPUCLASS_286: /* a 286 should not make it this far, anyway */
+#if !defined(I386_CPU) && !defined(I486_CPU) && !defined(I586_CPU)
+#error This kernel is not configured for one of the supported CPUs
+#endif
+#if !defined(I386_CPU)
+ case CPUCLASS_386:
+#endif
+#if !defined(I486_CPU)
+ case CPUCLASS_486:
+#endif
+#if !defined(I586_CPU)
+ case CPUCLASS_586:
+#endif
+ panic("CPU class not configured");
+ default:
+ break;
+ }
+}
+
+#ifdef PGINPROF
+/*
+ * Return the difference (in microseconds)
+ * between the current time and a previous
+ * time as represented by the arguments.
+ * If there is a pending clock interrupt
+ * which has not been serviced due to high
+ * ipl, return error code.
+ */
+/*ARGSUSED*/
+vmtime(otime, olbolt, oicr)
+ register int otime, olbolt, oicr;
+{
+
+ return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
+}
+#endif
+
+extern int kstack[];
+
+/*
+ * Send an interrupt to process.
+ *
+ * Stack is set up to allow sigcode stored
+ * in u. to call routine, followed by kcall
+ * to sigreturn routine below. After sigreturn
+ * resets the signal mask, the stack, and the
+ * frame pointer, it returns to the user
+ * specified pc, psl.
+ */
+void
+sendsig(catcher, sig, mask, code)
+ sig_t catcher;
+ int sig, mask;
+ unsigned code;
+{
+ register struct proc *p = curproc;
+ register int *regs;
+ register struct sigframe *fp;
+ struct sigacts *psp = p->p_sigacts;
+ int oonstack, frmtrap;
+
+ regs = p->p_md.md_regs;
+ oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
+ /*
+ * Allocate and validate space for the signal handler
+ * context. Note that if the stack is in P0 space, the
+ * call to grow() is a nop, and the useracc() check
+ * will fail if the process has not already allocated
+ * the space with a `brk'.
+ */
+ if ((psp->ps_flags & SAS_ALTSTACK) &&
+ (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
+ psp->ps_sigstk.ss_size - sizeof(struct sigframe));
+ psp->ps_sigstk.ss_flags |= SA_ONSTACK;
+ } else {
+ fp = (struct sigframe *)(regs[tESP]
+ - sizeof(struct sigframe));
+ }
+
+ /*
+ * grow() will return FALSE if the fp will not fit inside the stack
+ * and the stack can not be grown. useracc will return FALSE
+ * if access is denied.
+ */
+ if ((grow(p, (int)fp) == FALSE) ||
+ (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == FALSE)) {
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+ SIGACTION(p, SIGILL) = SIG_DFL;
+ sig = sigmask(SIGILL);
+ p->p_sigignore &= ~sig;
+ p->p_sigcatch &= ~sig;
+ p->p_sigmask &= ~sig;
+ psignal(p, SIGILL);
+ return;
+ }
+
+ /*
+ * Build the argument list for the signal handler.
+ */
+ fp->sf_signum = sig;
+ fp->sf_code = code;
+ fp->sf_scp = &fp->sf_sc;
+ fp->sf_addr = (char *) regs[tERR];
+ fp->sf_handler = catcher;
+
+ /* save scratch registers */
+ fp->sf_sc.sc_eax = regs[tEAX];
+ fp->sf_sc.sc_ebx = regs[tEBX];
+ fp->sf_sc.sc_ecx = regs[tECX];
+ fp->sf_sc.sc_edx = regs[tEDX];
+ fp->sf_sc.sc_esi = regs[tESI];
+ fp->sf_sc.sc_edi = regs[tEDI];
+ fp->sf_sc.sc_cs = regs[tCS];
+ fp->sf_sc.sc_ds = regs[tDS];
+ fp->sf_sc.sc_ss = regs[tSS];
+ fp->sf_sc.sc_es = regs[tES];
+ fp->sf_sc.sc_isp = regs[tISP];
+
+ /*
+ * Build the signal context to be used by sigreturn.
+ */
+ fp->sf_sc.sc_onstack = oonstack;
+ fp->sf_sc.sc_mask = mask;
+ fp->sf_sc.sc_sp = regs[tESP];
+ fp->sf_sc.sc_fp = regs[tEBP];
+ fp->sf_sc.sc_pc = regs[tEIP];
+ fp->sf_sc.sc_ps = regs[tEFLAGS];
+ regs[tESP] = (int)fp;
+ regs[tEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
+ regs[tEFLAGS] &= ~PSL_VM;
+ regs[tCS] = _ucodesel;
+ regs[tDS] = _udatasel;
+ regs[tES] = _udatasel;
+ regs[tSS] = _udatasel;
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * psl to gain improper privileges or to cause
+ * a machine fault.
+ */
+struct sigreturn_args {
+ struct sigcontext *sigcntxp;
+};
+
+int
+sigreturn(p, uap, retval)
+ struct proc *p;
+ struct sigreturn_args *uap;
+ int *retval;
+{
+ register struct sigcontext *scp;
+ register struct sigframe *fp;
+ register int *regs = p->p_md.md_regs;
+ int eflags;
+
+ /*
+ * (XXX old comment) regs[tESP] points to the return address.
+ * The user scp pointer is above that.
+ * The return address is faked in the signal trampoline code
+ * for consistency.
+ */
+ scp = uap->sigcntxp;
+ fp = (struct sigframe *)
+ ((caddr_t)scp - offsetof(struct sigframe, sf_sc));
+
+ if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0)
+ return(EINVAL);
+
+ eflags = scp->sc_ps;
+ if ((eflags & PSL_USERCLR) != 0 ||
+ (eflags & PSL_USERSET) != PSL_USERSET ||
+ (eflags & PSL_IOPL) < (regs[tEFLAGS] & PSL_IOPL)) {
+#ifdef DEBUG
+ printf("sigreturn: eflags=0x%x\n", eflags);
+#endif
+ return(EINVAL);
+ }
+
+ /*
+ * Sanity check the user's selectors and error if they
+ * are suspect.
+ */
+#define max_ldt_sel(pcb) \
+ ((pcb)->pcb_ldt ? (pcb)->pcb_ldt_len : (sizeof(ldt) / sizeof(ldt[0])))
+
+#define valid_ldt_sel(sel) \
+ (ISLDT(sel) && ISPL(sel) == SEL_UPL && \
+ IDXSEL(sel) < max_ldt_sel(&p->p_addr->u_pcb))
+
+#define null_sel(sel) \
+ (!ISLDT(sel) && IDXSEL(sel) == 0)
+
+ if ((scp->sc_cs&0xffff != _ucodesel && !valid_ldt_sel(scp->sc_cs)) ||
+ (scp->sc_ss&0xffff != _udatasel && !valid_ldt_sel(scp->sc_ss)) ||
+ (scp->sc_ds&0xffff != _udatasel && !valid_ldt_sel(scp->sc_ds) &&
+ !null_sel(scp->sc_ds)) ||
+ (scp->sc_es&0xffff != _udatasel && !valid_ldt_sel(scp->sc_es) &&
+ !null_sel(scp->sc_es))) {
+#ifdef DEBUG
+ printf("sigreturn: cs=0x%x ss=0x%x ds=0x%x es=0x%x\n",
+ scp->sc_cs, scp->sc_ss, scp->sc_ds, scp->sc_es);
+#endif
+ trapsignal(p, SIGBUS, T_PROTFLT);
+ return(EINVAL);
+ }
+
+#undef max_ldt_sel
+#undef valid_ldt_sel
+#undef null_sel
+
+ /* restore scratch registers */
+ regs[tEAX] = scp->sc_eax;
+ regs[tEBX] = scp->sc_ebx;
+ regs[tECX] = scp->sc_ecx;
+ regs[tEDX] = scp->sc_edx;
+ regs[tESI] = scp->sc_esi;
+ regs[tEDI] = scp->sc_edi;
+ regs[tCS] = scp->sc_cs;
+ regs[tDS] = scp->sc_ds;
+ regs[tES] = scp->sc_es;
+ regs[tSS] = scp->sc_ss;
+ regs[tISP] = scp->sc_isp;
+
+ if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
+ return(EINVAL);
+
+ if (scp->sc_onstack & 01)
+ p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
+ else
+ p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
+ p->p_sigmask = scp->sc_mask &~
+ (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
+ regs[tEBP] = scp->sc_fp;
+ regs[tESP] = scp->sc_sp;
+ regs[tEIP] = scp->sc_pc;
+ regs[tEFLAGS] = eflags;
+ return(EJUSTRETURN);
+}
+
+/*
+ * a simple function to make the system panic (and dump a vmcore)
+ * in a predictable fashion
+ */
+void diediedie()
+{
+ panic("because you said to!");
+}
+
+int waittime = -1;
+struct pcb dumppcb;
+
+void
+boot(arghowto)
+ int arghowto;
+{
+ register long dummy; /* r12 is reserved */
+ register int howto; /* r11 == how to boot */
+ register int devtype; /* r10 == major of root dev */
+ extern int cold;
+ int nomsg = 1;
+
+ if (cold) {
+ printf("hit reset please");
+ for(;;);
+ }
+ howto = arghowto;
+ if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
+ register struct buf *bp;
+ int iter, nbusy;
+
+ waittime = 0;
+ (void) splnet();
+ printf("syncing disks... ");
+ /*
+ * Release inodes held by texts before update.
+ */
+ if (panicstr == 0)
+ vnode_pager_umount(NULL);
+ sync(curproc, NULL, NULL);
+ /*
+ * Unmount filesystems
+ */
+#if 0
+ if (panicstr == 0)
+ vfs_unmountall();
+#endif
+
+ for (iter = 0; iter < 20; iter++) {
+ nbusy = 0;
+ for (bp = &buf[nbuf]; --bp >= buf; )
+ if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
+ nbusy++;
+ if (nbusy == 0)
+ break;
+ if (nomsg) {
+ printf("updating disks before rebooting... ");
+ nomsg = 0;
+ }
+ printf("%d ", nbusy);
+ DELAY(40000 * iter);
+ }
+ if (nbusy)
+ printf("giving up\n");
+ else
+ printf("done\n");
+ DELAY(10000); /* wait for printf to finish */
+ }
+ splhigh();
+ devtype = major(rootdev);
+ if (howto&RB_HALT) {
+ printf("\n");
+ printf("The operating system has halted.\n");
+ printf("Please press any key to reboot.\n\n");
+ cngetc();
+ } else {
+ if (howto & RB_DUMP) {
+ savectx(&dumppcb, 0);
+ dumppcb.pcb_ptd = rcr3();
+ dumpsys();
+
+ if (PANIC_REBOOT_WAIT_TIME != 0) {
+ if (PANIC_REBOOT_WAIT_TIME != -1) {
+ int loop;
+ printf("Automatic reboot in %d seconds - press a key on the console to abort\n",
+ PANIC_REBOOT_WAIT_TIME);
+ for (loop = PANIC_REBOOT_WAIT_TIME; loop > 0; --loop) {
+ DELAY(1000 * 1000); /* one second */
+ if (sgetc(1)) /* Did user type a key? */
+ break;
+ }
+ if (!loop)
+ goto die;
+ }
+ } else { /* zero time specified - reboot NOW */
+ goto die;
+ }
+ printf("--> Press a key on the console to reboot <--\n");
+ cngetc();
+ }
+ }
+#ifdef lint
+ dummy = 0; dummy = dummy;
+ printf("howto %d, devtype %d\n", arghowto, devtype);
+#endif
+die:
+ printf("Rebooting...\n");
+ DELAY(1000000); /* wait 1 sec for printf's to complete and be read */
+ cpu_reset();
+ for(;;) ;
+ /* NOTREACHED */
+}
+
+unsigned long dumpmag = 0x8fca0101UL; /* magic number for savecore */
+int dumpsize = 0; /* also for savecore */
+/*
+ * Doadump comes here after turning off memory management and
+ * getting on the dump stack, either when called above, or by
+ * the auto-restart code.
+ */
+void
+dumpsys()
+{
+
+ if (dumpdev == NODEV)
+ return;
+ if ((minor(dumpdev)&07) != 1)
+ return;
+ dumpsize = Maxmem;
+ printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
+ printf("dump ");
+ switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
+
+ case ENXIO:
+ printf("device bad\n");
+ break;
+
+ case EFAULT:
+ printf("device not ready\n");
+ break;
+
+ case EINVAL:
+ printf("area improper\n");
+ break;
+
+ case EIO:
+ printf("i/o error\n");
+ break;
+
+ case EINTR:
+ printf("aborted from console\n");
+ break;
+
+ default:
+ printf("succeeded\n");
+ break;
+ }
+}
+
+#ifdef HZ
+/*
+ * If HZ is defined we use this code, otherwise the code in
+ * /sys/i386/i386/microtime.s is used. The othercode only works
+ * for HZ=100.
+ */
+microtime(tvp)
+ register struct timeval *tvp;
+{
+ int s = splhigh();
+
+ *tvp = time;
+ tvp->tv_usec += tick;
+ while (tvp->tv_usec > 1000000) {
+ tvp->tv_sec++;
+ tvp->tv_usec -= 1000000;
+ }
+ splx(s);
+}
+#endif /* HZ */
+
+static void
+initcpu()
+{
+}
+
+/*
+ * Clear registers on exec
+ */
+void
+setregs(p, entry, stack)
+ struct proc *p;
+ u_long entry;
+ u_long stack;
+{
+ p->p_md.md_regs[tEBP] = 0; /* bottom of the fp chain */
+ p->p_md.md_regs[tEIP] = entry;
+ p->p_md.md_regs[tESP] = stack;
+ p->p_md.md_regs[tSS] = _udatasel;
+ p->p_md.md_regs[tDS] = _udatasel;
+ p->p_md.md_regs[tES] = _udatasel;
+ p->p_md.md_regs[tCS] = _ucodesel;
+
+ p->p_addr->u_pcb.pcb_flags = 0; /* no fp at all */
+ load_cr0(rcr0() | CR0_TS); /* start emulating */
+#if NNPX > 0
+ npxinit(__INITIAL_NPXCW__);
+#endif /* NNPX > 0 */
+}
+
+/*
+ * machine dependent system variables.
+ */
+int
+cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+
+ /* all sysctl names at this level are terminal */
+ if (namelen != 1)
+ return (ENOTDIR); /* overloaded */
+
+ switch (name[0]) {
+ case CPU_CONSDEV:
+ return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
+ sizeof cn_tty->t_dev));
+ default:
+ return (EOPNOTSUPP);
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * Initialize 386 and configure to run kernel
+ */
+
+/*
+ * Initialize segments & interrupt table
+ */
+
+union descriptor gdt[NGDT];
+union descriptor ldt[NLDT]; /* local descriptor table */
+struct gate_descriptor idt[NIDT]; /* interrupt descriptor table */
+
+int _default_ldt, currentldt;
+
+struct i386tss tss, panic_tss;
+
+extern struct user *proc0paddr;
+
+/* software prototypes -- in more palatable form */
+struct soft_segment_descriptor gdt_segs[] = {
+ /* Null Descriptor */
+{ 0x0, /* segment base address */
+ 0x0, /* length */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0, 0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Code Descriptor for kernel */
+{ 0x0, /* segment base address */
+ 0xfffff, /* length - all address space */
+ SDT_MEMERA, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0, 0,
+ 1, /* default 32 vs 16 bit size */
+ 1 /* limit granularity (byte/page units)*/ },
+ /* Data Descriptor for kernel */
+{ 0x0, /* segment base address */
+ 0xfffff, /* length - all address space */
+ SDT_MEMRWA, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0, 0,
+ 1, /* default 32 vs 16 bit size */
+ 1 /* limit granularity (byte/page units)*/ },
+ /* LDT Descriptor */
+{ (int) ldt, /* segment base address */
+ sizeof(ldt)-1, /* length - all address space */
+ SDT_SYSLDT, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0, 0,
+ 0, /* unused - default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Null Descriptor - Placeholder */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0, 0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Panic Tss Descriptor */
+{ (int) &panic_tss, /* segment base address */
+ sizeof(tss)-1, /* length - all address space */
+ SDT_SYS386TSS, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0, 0,
+ 0, /* unused - default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Proc 0 Tss Descriptor */
+{ (int) kstack, /* segment base address */
+ sizeof(tss)-1, /* length - all address space */
+ SDT_SYS386TSS, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0, 0,
+ 0, /* unused - default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* User LDT Descriptor per process */
+{ (int) ldt, /* segment base address */
+ (512 * sizeof(union descriptor)-1), /* length */
+ SDT_SYSLDT, /* segment type */
+ 0, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0, 0,
+ 0, /* unused - default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+};
+
+struct soft_segment_descriptor ldt_segs[] = {
+ /* Null Descriptor - overwritten by call gate */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0, 0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Null Descriptor - overwritten by call gate */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0, 0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Null Descriptor - overwritten by call gate */
+{ 0x0, /* segment base address */
+ 0x0, /* length - all address space */
+ 0, /* segment type */
+ 0, /* segment descriptor priority level */
+ 0, /* segment descriptor present */
+ 0, 0,
+ 0, /* default 32 vs 16 bit size */
+ 0 /* limit granularity (byte/page units)*/ },
+ /* Code Descriptor for user */
+{ 0x0, /* segment base address */
+ 0xfffff, /* length - all address space */
+ SDT_MEMERA, /* segment type */
+ SEL_UPL, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0, 0,
+ 1, /* default 32 vs 16 bit size */
+ 1 /* limit granularity (byte/page units)*/ },
+ /* Data Descriptor for user */
+{ 0x0, /* segment base address */
+ 0xfffff, /* length - all address space */
+ SDT_MEMRWA, /* segment type */
+ SEL_UPL, /* segment descriptor priority level */
+ 1, /* segment descriptor present */
+ 0, 0,
+ 1, /* default 32 vs 16 bit size */
+ 1 /* limit granularity (byte/page units)*/ } };
+
+void
+setidt(idx, func, typ, dpl)
+ int idx;
+ void (*func)();
+ int typ;
+ int dpl;
+{
+ struct gate_descriptor *ip = idt + idx;
+
+ ip->gd_looffset = (int)func;
+ ip->gd_selector = 8;
+ ip->gd_stkcpy = 0;
+ ip->gd_xx = 0;
+ ip->gd_type = typ;
+ ip->gd_dpl = dpl;
+ ip->gd_p = 1;
+ ip->gd_hioffset = ((int)func)>>16 ;
+}
+
+#define IDTVEC(name) __CONCAT(X,name)
+typedef void idtvec_t();
+
+extern idtvec_t
+ IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
+ IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
+ IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
+ IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
+ IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
+ IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
+ IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
+ IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(syscall);
+
+int _gsel_tss;
+
+void
+init386(first)
+ int first;
+{
+ extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
+ int x, *pi;
+ unsigned biosbasemem, biosextmem;
+ struct gate_descriptor *gdp;
+ extern int sigcode,szsigcode;
+ /* table descriptors - used to load tables by microp */
+ struct region_descriptor r_gdt, r_idt;
+ int pagesinbase, pagesinext;
+ int target_page;
+
+ proc0.p_addr = proc0paddr;
+
+ /*
+ * Initialize the console before we print anything out.
+ */
+
+ cninit ();
+
+ /*
+ * make gdt memory segments, the code segment goes up to end of the
+ * page with etext in it, the data segment goes to the end of
+ * the address space
+ */
+ gdt_segs[GCODE_SEL].ssd_limit = i386_btop(i386_round_page(&etext)) - 1;
+ gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1;
+ for (x=0; x < NGDT; x++) ssdtosd(gdt_segs+x, gdt+x);
+
+ /* make ldt memory segments */
+ /*
+ * The data segment limit must not cover the user area because we
+ * don't want the user area to be writable in copyout() etc. (page
+ * level protection is lost in kernel mode on 386's). Also, we
+ * don't want the user area to be writable directly (page level
+ * protection of the user area is not available on 486's with
+ * CR0_WP set, because there is no user-read/kernel-write mode).
+ *
+ * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it
+ * should be spelled ...MAX_USER...
+ */
+#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS
+ /*
+ * The code segment limit has to cover the user area until we move
+ * the signal trampoline out of the user area. This is safe because
+ * the code segment cannot be written to directly.
+ */
+#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * NBPG)
+ ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
+ ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
+ /* Note. eventually want private ldts per process */
+ for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
+
+ /* exceptions */
+ setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL);
+ setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL);
+ setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL);
+ setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL);
+ setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL);
+ setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL);
+ setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL);
+ setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL);
+ setidt(8, &IDTVEC(dble), SDT_SYS386TGT, SEL_KPL);
+ setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL);
+ setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL);
+ setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL);
+ setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL);
+ setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL);
+ setidt(14, &IDTVEC(page), SDT_SYS386TGT, SEL_KPL);
+ setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL);
+ setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL);
+ setidt(17, &IDTVEC(rsvd0), SDT_SYS386TGT, SEL_KPL);
+ setidt(18, &IDTVEC(rsvd1), SDT_SYS386TGT, SEL_KPL);
+ setidt(19, &IDTVEC(rsvd2), SDT_SYS386TGT, SEL_KPL);
+ setidt(20, &IDTVEC(rsvd3), SDT_SYS386TGT, SEL_KPL);
+ setidt(21, &IDTVEC(rsvd4), SDT_SYS386TGT, SEL_KPL);
+ setidt(22, &IDTVEC(rsvd5), SDT_SYS386TGT, SEL_KPL);
+ setidt(23, &IDTVEC(rsvd6), SDT_SYS386TGT, SEL_KPL);
+ setidt(24, &IDTVEC(rsvd7), SDT_SYS386TGT, SEL_KPL);
+ setidt(25, &IDTVEC(rsvd8), SDT_SYS386TGT, SEL_KPL);
+ setidt(26, &IDTVEC(rsvd9), SDT_SYS386TGT, SEL_KPL);
+ setidt(27, &IDTVEC(rsvd10), SDT_SYS386TGT, SEL_KPL);
+ setidt(28, &IDTVEC(rsvd11), SDT_SYS386TGT, SEL_KPL);
+ setidt(29, &IDTVEC(rsvd12), SDT_SYS386TGT, SEL_KPL);
+ setidt(30, &IDTVEC(rsvd13), SDT_SYS386TGT, SEL_KPL);
+ setidt(31, &IDTVEC(rsvd14), SDT_SYS386TGT, SEL_KPL);
+
+#include "isa.h"
+#if NISA >0
+ isa_defaultirq();
+#endif
+
+ r_gdt.rd_limit = sizeof(gdt) - 1;
+ r_gdt.rd_base = (int) gdt;
+ lgdt(&r_gdt);
+
+ r_idt.rd_limit = sizeof(idt) - 1;
+ r_idt.rd_base = (int) idt;
+ lidt(&r_idt);
+
+ _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
+ lldt(_default_ldt);
+ currentldt = _default_ldt;
+
+#include "ddb.h"
+#if NDDB > 0
+ kdb_init();
+ if (boothowto & RB_KDB)
+ Debugger("Boot flags requested debugger");
+#endif
+
+ /* Use BIOS values stored in RTC CMOS RAM, since probing
+ * breaks certain 386 AT relics.
+ */
+ biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
+ biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
+
+ /*
+ * If BIOS tells us that it has more than 640k in the basemem,
+ * don't believe it - set it to 640k.
+ */
+ if (biosbasemem > 640)
+ biosbasemem = 640;
+
+ /*
+ * Some 386 machines might give us a bogus number for extended
+ * mem. If this happens, stop now.
+ */
+#ifndef LARGEMEM
+ if (biosextmem > 65536) {
+ panic("extended memory beyond limit of 64MB");
+ /* NOTREACHED */
+ }
+#endif
+
+ pagesinbase = biosbasemem * 1024 / NBPG;
+ pagesinext = biosextmem * 1024 / NBPG;
+
+ /*
+ * Special hack for chipsets that still remap the 384k hole when
+ * there's 16MB of memory - this really confuses people that
+ * are trying to use bus mastering ISA controllers with the
+ * "16MB limit"; they only have 16MB, but the remapping puts
+ * them beyond the limit.
+ * XXX - this should be removed when bounce buffers are
+ * implemented.
+ */
+ /*
+ * If extended memory is between 15-16MB (16-17MB phys address range),
+ * chop it to 15MB.
+ */
+ if ((pagesinext > 3840) && (pagesinext < 4096))
+ pagesinext = 3840;
+
+ /*
+ * Maxmem isn't the "maximum memory", it's the highest page of
+ * of the physical address space. It should be "Maxphyspage".
+ */
+ Maxmem = pagesinext + 0x100000/PAGE_SIZE;
+
+#ifdef MAXMEM
+ if (MAXMEM/4 < Maxmem)
+ Maxmem = MAXMEM/4;
+#endif
+ /*
+ * Calculate number of physical pages, but account for Maxmem
+ * limitation above.
+ */
+ physmem = pagesinbase +
+ (min(pagesinext + 0x100000/PAGE_SIZE, Maxmem) - 0x100000/PAGE_SIZE);
+
+ /* call pmap initialization to make new kernel address space */
+ pmap_bootstrap (first, 0);
+
+ /*
+ * Do simple memory test over range of extended memory that BIOS
+ * indicates exists. Adjust Maxmem to the highest page of
+ * good memory.
+ */
+ printf("Testing memory (%dMB)...", ptoa(Maxmem)/1024/1024);
+
+ for (target_page = Maxmem - 1; target_page >= atop(first); target_page--) {
+ extern struct pte *CMAP1;
+ extern caddr_t CADDR1;
+
+ /*
+ * map page into kernel: valid, read/write, non-cacheable
+ */
+ *(int *)CMAP1 = PG_V | PG_KW | PG_N | ptoa(target_page);
+ tlbflush();
+
+ /*
+ * Test for alternating 1's and 0's
+ */
+ filli(0xaaaaaaaa, CADDR1, PAGE_SIZE/sizeof(int));
+ if (test_page((int *)CADDR1, 0xaaaaaaaa)) {
+ Maxmem = target_page;
+ badpages++;
+ continue;
+ }
+ /*
+ * Test for alternating 0's and 1's
+ */
+ filli(0x55555555, CADDR1, PAGE_SIZE/sizeof(int));
+ if (test_page((int *)CADDR1, 0x55555555)) {
+ Maxmem = target_page;
+ badpages++;
+ continue;
+ }
+ /*
+ * Test for all 1's
+ */
+ filli(0xffffffff, CADDR1, PAGE_SIZE/sizeof(int));
+ if (test_page((int *)CADDR1, 0xffffffff)) {
+ Maxmem = target_page;
+ badpages++;
+ continue;
+ }
+ /*
+ * Test zeroing of page
+ */
+ bzero(CADDR1, PAGE_SIZE);
+ if (test_page((int *)CADDR1, 0)) {
+ /*
+ * test of page failed
+ */
+ Maxmem = target_page;
+ badpages++;
+ continue;
+ }
+ }
+ printf("done.\n");
+
+ avail_end = (Maxmem << PAGE_SHIFT)
+ - i386_round_page(sizeof(struct msgbuf));
+
+ /*
+ * Initialize pointers to the two chunks of memory; for use
+ * later in vm_page_startup.
+ */
+ /* avail_start is initialized in pmap_bootstrap */
+ x = 0;
+ if (pagesinbase > 1) {
+ phys_avail[x++] = NBPG; /* skip first page of memory */
+ phys_avail[x++] = pagesinbase * NBPG; /* memory up to the ISA hole */
+ }
+ phys_avail[x++] = avail_start; /* memory up to the end */
+ phys_avail[x++] = avail_end;
+ phys_avail[x++] = 0; /* no more chunks */
+ phys_avail[x++] = 0;
+
+ /* now running on new page tables, configured,and u/iom is accessible */
+
+ /* make a initial tss so microp can get interrupt stack on syscall! */
+ proc0.p_addr->u_pcb.pcb_tss.tss_esp0 = (int) kstack + UPAGES*NBPG;
+ proc0.p_addr->u_pcb.pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
+ _gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
+
+ ((struct i386tss *)gdt_segs[GPROC0_SEL].ssd_base)->tss_ioopt =
+ (sizeof(tss))<<16;
+
+ ltr(_gsel_tss);
+
+ /* make a call gate to reenter kernel with */
+ gdp = &ldt[LSYS5CALLS_SEL].gd;
+
+ x = (int) &IDTVEC(syscall);
+ gdp->gd_looffset = x++;
+ gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
+ gdp->gd_stkcpy = 1;
+ gdp->gd_type = SDT_SYS386CGT;
+ gdp->gd_dpl = SEL_UPL;
+ gdp->gd_p = 1;
+ gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
+
+ /* transfer to user mode */
+
+ _ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
+ _udatasel = LSEL(LUDATA_SEL, SEL_UPL);
+
+ /* setup proc 0's pcb */
+ bcopy(&sigcode, proc0.p_addr->u_pcb.pcb_sigc, szsigcode);
+ proc0.p_addr->u_pcb.pcb_flags = 0;
+ proc0.p_addr->u_pcb.pcb_ptd = IdlePTD;
+}
+
+int
+test_page(address, pattern)
+ int *address;
+ int pattern;
+{
+ int *x;
+
+ for (x = address; x < (int *)((char *)address + PAGE_SIZE); x++) {
+ if (*x != pattern)
+ return (1);
+ }
+ return(0);
+}
+
+/*
+ * insert an element into a queue
+ */
+#undef insque
+void /* XXX replace with inline FIXME! */
+_insque(element, head)
+ register struct prochd *element, *head;
+{
+ element->ph_link = head->ph_link;
+ head->ph_link = (struct proc *)element;
+ element->ph_rlink = (struct proc *)head;
+ ((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
+}
+
+/*
+ * remove an element from a queue
+ */
+#undef remque
+void /* XXX replace with inline FIXME! */
+_remque(element)
+ register struct prochd *element;
+{
+ ((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
+ ((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
+ element->ph_rlink = (struct proc *)0;
+}
+
+/*
+ * The registers are in the frame; the frame is in the user area of
+ * the process in question; when the process is active, the registers
+ * are in "the kernel stack"; when it's not, they're still there, but
+ * things get flipped around. So, since p->p_md.md_regs is the whole address
+ * of the register set, take its offset from the kernel stack, and
+ * index into the user block. Don't you just *love* virtual memory?
+ * (I'm starting to think seymour is right...)
+ */
+
+int
+ptrace_set_pc (struct proc *p, unsigned int addr) {
+ void *regs = (char*)p->p_addr +
+ ((char*) p->p_md.md_regs - (char*) kstack);
+
+ ((struct trapframe *)regs)->tf_eip = addr;
+ return 0;
+}
+
+int
+ptrace_single_step (struct proc *p) {
+ void *regs = (char*)p->p_addr +
+ ((char*) p->p_md.md_regs - (char*) kstack);
+
+ ((struct trapframe *)regs)->tf_eflags |= PSL_T;
+ return 0;
+}
+
+/*
+ * Copy the registers to user-space.
+ */
+
+int
+ptrace_getregs (struct proc *p, unsigned int *addr) {
+ int error;
+ struct reg regs = {0};
+
+ if (error = fill_regs (p, &regs))
+ return error;
+
+ return copyout (&regs, addr, sizeof (regs));
+}
+
+int
+ptrace_setregs (struct proc *p, unsigned int *addr) {
+ int error;
+ struct reg regs = {0};
+
+ if (error = copyin (addr, &regs, sizeof(regs)))
+ return error;
+
+ return set_regs (p, &regs);
+}
+
+int
+fill_regs(struct proc *p, struct reg *regs) {
+ int error;
+ struct trapframe *tp;
+ void *ptr = (char*)p->p_addr +
+ ((char*) p->p_md.md_regs - (char*) kstack);
+
+ tp = ptr;
+ regs->r_es = tp->tf_es;
+ regs->r_ds = tp->tf_ds;
+ regs->r_edi = tp->tf_edi;
+ regs->r_esi = tp->tf_esi;
+ regs->r_ebp = tp->tf_ebp;
+ regs->r_ebx = tp->tf_ebx;
+ regs->r_edx = tp->tf_edx;
+ regs->r_ecx = tp->tf_ecx;
+ regs->r_eax = tp->tf_eax;
+ regs->r_eip = tp->tf_eip;
+ regs->r_cs = tp->tf_cs;
+ regs->r_eflags = tp->tf_eflags;
+ regs->r_esp = tp->tf_esp;
+ regs->r_ss = tp->tf_ss;
+ return 0;
+}
+
+int
+set_regs (struct proc *p, struct reg *regs) {
+ int error;
+ struct trapframe *tp;
+ void *ptr = (char*)p->p_addr +
+ ((char*) p->p_md.md_regs - (char*) kstack);
+
+ tp = ptr;
+ tp->tf_es = regs->r_es;
+ tp->tf_ds = regs->r_ds;
+ tp->tf_edi = regs->r_edi;
+ tp->tf_esi = regs->r_esi;
+ tp->tf_ebp = regs->r_ebp;
+ tp->tf_ebx = regs->r_ebx;
+ tp->tf_edx = regs->r_edx;
+ tp->tf_ecx = regs->r_ecx;
+ tp->tf_eax = regs->r_eax;
+ tp->tf_eip = regs->r_eip;
+ tp->tf_cs = regs->r_cs;
+ tp->tf_eflags = regs->r_eflags;
+ tp->tf_esp = regs->r_esp;
+ tp->tf_ss = regs->r_ss;
+ return 0;
+}
+
+#include "ddb.h"
+#if NDDB <= 0
+void
+Debugger(const char *msg)
+{
+ printf("Debugger(\"%s\") called.\n", msg);
+}
+#endif /* no DDB */
+
+#include <sys/disklabel.h>
+#define b_cylin b_resid
+#define dkpart(dev) (minor(dev) & 7)
+/*
+ * Determine the size of the transfer, and make sure it is
+ * within the boundaries of the partition. Adjust transfer
+ * if needed, and signal errors or early completion.
+ */
+int
+bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
+{
+ struct partition *p = lp->d_partitions + dkpart(bp->b_dev);
+ int labelsect = lp->d_partitions[0].p_offset;
+ int maxsz = p->p_size,
+ sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT;
+
+ /* overwriting disk label ? */
+ /* XXX should also protect bootstrap in first 8K */
+ if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect &&
+#if LABELSECTOR != 0
+ bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect &&
+#endif
+ (bp->b_flags & B_READ) == 0 && wlabel == 0) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+
+#if defined(DOSBBSECTOR) && defined(notyet)
+ /* overwriting master boot record? */
+ if (bp->b_blkno + p->p_offset <= DOSBBSECTOR &&
+ (bp->b_flags & B_READ) == 0 && wlabel == 0) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+#endif
+
+ /* beyond partition? */
+ if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) {
+ /* if exactly at end of disk, return an EOF */
+ if (bp->b_blkno == maxsz) {
+ bp->b_resid = bp->b_bcount;
+ return(0);
+ }
+ /* or truncate if part of it fits */
+ sz = maxsz - bp->b_blkno;
+ if (sz <= 0) {
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+ bp->b_bcount = sz << DEV_BSHIFT;
+ }
+
+ /* calculate cylinder for disksort to order transfers with */
+ bp->b_pblkno = bp->b_blkno + p->p_offset;
+ bp->b_cylin = bp->b_pblkno / lp->d_secpercyl;
+ return(1);
+
+bad:
+ bp->b_flags |= B_ERROR;
+ return(-1);
+}
+
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
new file mode 100644
index 0000000..1b8f187
--- /dev/null
+++ b/sys/amd64/amd64/mem.c
@@ -0,0 +1,259 @@
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and code derived from software contributed to
+ * Berkeley by William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: mem.c 1.13 89/10/08$
+ * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
+ * $Id: mem.c,v 1.6 1993/12/19 00:50:06 wollman Exp $
+ */
+
+/*
+ * Memory special file
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+
+#include <machine/cpu.h>
+#include <machine/psl.h>
+
+#include <vm/vm_param.h>
+#include <vm/lock.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
+
+extern char *ptvmmap; /* poor name! */
+/*ARGSUSED*/
+int
+mmclose(dev, uio, flags)
+ dev_t dev;
+ struct uio *uio;
+ int flags;
+{
+ struct trapframe *fp;
+
+ switch (minor(dev)) {
+ case 14:
+ fp = (struct trapframe *)curproc->p_md.md_regs;
+ fp->tf_eflags &= ~PSL_IOPL;
+ break;
+ default:
+ break;
+ }
+ return(0);
+}
+/*ARGSUSED*/
+int
+mmopen(dev, uio, flags)
+ dev_t dev;
+ struct uio *uio;
+ int flags;
+{
+ struct trapframe *fp;
+
+ switch (minor(dev)) {
+ case 14:
+ fp = (struct trapframe *)curproc->p_md.md_regs;
+ fp->tf_eflags |= PSL_IOPL;
+ break;
+ default:
+ break;
+ }
+ return(0);
+}
+/*ARGSUSED*/
+int
+mmrw(dev, uio, flags)
+ dev_t dev;
+ struct uio *uio;
+ int flags;
+{
+ register int o;
+ register u_int c, v;
+ register struct iovec *iov;
+ int error = 0;
+ caddr_t zbuf = NULL;
+
+ while (uio->uio_resid > 0 && error == 0) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("mmrw");
+ continue;
+ }
+ switch (minor(dev)) {
+
+/* minor device 0 is physical memory */
+ case 0:
+ v = uio->uio_offset;
+ pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
+ uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
+ TRUE);
+ o = (int)uio->uio_offset & PGOFSET;
+ c = (u_int)(NBPG - ((int)iov->iov_base & PGOFSET));
+ c = min(c, (u_int)(NBPG - o));
+ c = min(c, (u_int)iov->iov_len);
+ error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
+ pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
+ (vm_offset_t)&ptvmmap[NBPG]);
+ continue;
+
+/* minor device 1 is kernel memory */
+ case 1:
+ c = iov->iov_len;
+ if (!kernacc((caddr_t)(int)uio->uio_offset, c,
+ uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
+ return(EFAULT);
+ error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
+ continue;
+
+/* minor device 2 is EOF/RATHOLE */
+ case 2:
+ if (uio->uio_rw == UIO_READ)
+ return (0);
+ c = iov->iov_len;
+ break;
+
+/* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
+ case 12:
+ if (uio->uio_rw == UIO_WRITE) {
+ c = iov->iov_len;
+ break;
+ }
+ if (zbuf == NULL) {
+ zbuf = (caddr_t)
+ malloc(CLBYTES, M_TEMP, M_WAITOK);
+ bzero(zbuf, CLBYTES);
+ }
+ c = min(iov->iov_len, CLBYTES);
+ error = uiomove(zbuf, (int)c, uio);
+ continue;
+
+#ifdef notyet
+/* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate
+ i/o device address bus, different than memory bus. Semantics here are
+ very different than ordinary read/write, as if iov_len is a multiple
+ an implied string move from a single port will be done. Note that lseek
+ must be used to set the port number reliably. */
+ case 14:
+ if (iov->iov_len == 1) {
+ u_char tmp;
+ tmp = inb(uio->uio_offset);
+ error = uiomove (&tmp, iov->iov_len, uio);
+ } else {
+ if (!useracc((caddr_t)iov->iov_base,
+ iov->iov_len, uio->uio_rw))
+ return (EFAULT);
+ insb(uio->uio_offset, iov->iov_base,
+ iov->iov_len);
+ }
+ break;
+ case 15:
+ if (iov->iov_len == sizeof (short)) {
+ u_short tmp;
+ tmp = inw(uio->uio_offset);
+ error = uiomove (&tmp, iov->iov_len, uio);
+ } else {
+ if (!useracc((caddr_t)iov->iov_base,
+ iov->iov_len, uio->uio_rw))
+ return (EFAULT);
+ insw(uio->uio_offset, iov->iov_base,
+ iov->iov_len/ sizeof (short));
+ }
+ break;
+ case 16:
+ if (iov->iov_len == sizeof (long)) {
+ u_long tmp;
+ tmp = inl(uio->uio_offset);
+ error = uiomove (&tmp, iov->iov_len, uio);
+ } else {
+ if (!useracc((caddr_t)iov->iov_base,
+ iov->iov_len, uio->uio_rw))
+ return (EFAULT);
+ insl(uio->uio_offset, iov->iov_base,
+ iov->iov_len/ sizeof (long));
+ }
+ break;
+#endif
+
+ default:
+ return (ENXIO);
+ }
+ if (error)
+ break;
+ iov->iov_base += c;
+ iov->iov_len -= c;
+ uio->uio_offset += c;
+ uio->uio_resid -= c;
+ }
+ if (zbuf)
+ free(zbuf, M_TEMP);
+ return (error);
+}
+
+
+
+
+/*******************************************************\
+* allow user processes to MMAP some memory sections *
+* instead of going through read/write *
+\*******************************************************/
+int memmmap(dev_t dev, int offset, int nprot)
+{
+ switch (minor(dev))
+ {
+
+/* minor device 0 is physical memory */
+ case 0:
+ return i386_btop(offset);
+
+/* minor device 1 is kernel memory */
+ case 1:
+ return i386_btop(vtophys(offset));
+
+ default:
+ return -1;
+ }
+}
+
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
new file mode 100644
index 0000000..88db9dd
--- /dev/null
+++ b/sys/amd64/amd64/pmap.c
@@ -0,0 +1,1944 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ * $Id: pmap.c,v 1.24 1994/04/20 07:06:14 davidg Exp $
+ */
+
+/*
+ * Derived from hp300 version by Mike Hibler, this version by William
+ * Jolitz uses a recursive map [a pde points to the page directory] to
+ * map the page tables using the pagetables themselves. This is done to
+ * reduce the impact on kernel virtual memory for lots of sparse address
+ * space, and to reduce the cost of memory to each process.
+ *
+ * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90
+ */
+/*
+ * Major modifications by John S. Dyson primarily to support
+ * pageable page tables, eliminating pmap_attributes,
+ * discontiguous memory pages, and using more efficient string
+ * instructions. Jan 13, 1994. Further modifications on Mar 2, 1994,
+ * general clean-up and efficiency mods.
+ */
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <i386/include/cpufunc.h>
+#include <i386/include/cputypes.h>
+
+#include <i386/isa/isa.h>
+
+/*
+ * Allocate various and sundry SYSMAPs used in the days of old VM
+ * and not yet converted. XXX.
+ */
+#define BSDVM_COMPAT 1
+
+/*
+ * Get PDEs and PTEs for user/kernel address space
+ */
+#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
+#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023])
+
+#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
+
+#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
+#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
+#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
+#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0)
+#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
+
+#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
+#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
+
+/*
+ * Given a map and a machine independent protection code,
+ * convert to a vax protection code.
+ */
+#define pte_prot(m, p) (protection_codes[p])
+int protection_codes[8];
+
+struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+vm_offset_t phys_avail[6]; /* 2 entries + 1 null */
+vm_offset_t avail_start; /* PA of first available physical page */
+vm_offset_t avail_end; /* PA of last available physical page */
+vm_size_t mem_size; /* memory size in bytes */
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */
+boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+vm_offset_t vm_first_phys, vm_last_phys;
+
+static inline boolean_t pmap_testbit();
+static inline void pmap_changebit();
+static inline int pmap_is_managed();
+static inline void *vm_get_pmap();
+static inline void vm_put_pmap();
+inline void pmap_use_pt();
+inline void pmap_unuse_pt();
+inline pt_entry_t * pmap_pte();
+static inline pv_entry_t get_pv_entry();
+void pmap_alloc_pv_entry();
+void pmap_clear_modify();
+void i386_protection_init();
+extern vm_offset_t clean_sva, clean_eva;
+extern int cpu_class;
+
+#if BSDVM_COMPAT
+#include "msgbuf.h"
+
+/*
+ * All those kernel PT submaps that BSD is so fond of
+ */
+pt_entry_t *CMAP1, *CMAP2, *ptmmap;
+caddr_t CADDR1, CADDR2, ptvmmap;
+pt_entry_t *msgbufmap;
+struct msgbuf *msgbufp;
+#endif
+
+void init_pv_entries(int) ;
+
+/*
+ * Routine: pmap_pte
+ * Function:
+ * Extract the page table entry associated
+ * with the given map/virtual_address pair.
+ * [ what about induced faults -wfj]
+ */
+
+inline pt_entry_t *
+pmap_pte(pmap, va)
+ pmap_t pmap;
+ vm_offset_t va;
+{
+
+ if (pmap && *pmap_pde(pmap, va)) {
+ vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
+ /* are we current address space or kernel? */
+ if ( (pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME)))
+ return ((pt_entry_t *) vtopte(va));
+ /* otherwise, we are alternate address space */
+ else {
+ if ( frame != ((int) APTDpde & PG_FRAME) ) {
+ APTDpde = pmap->pm_pdir[PTDPTDI];
+ tlbflush();
+ }
+ return((pt_entry_t *) avtopte(va));
+ }
+ }
+ return(0);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+
+vm_offset_t
+pmap_extract(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
+{
+ pd_entry_t save;
+ vm_offset_t pa;
+ int s;
+
+ if (pmap && *pmap_pde(pmap, va)) {
+ vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
+ /* are we current address space or kernel? */
+ if ( (pmap == kernel_pmap)
+ || (frame == ((int) PTDpde & PG_FRAME)) ) {
+ pa = *(int *) vtopte(va);
+ /* otherwise, we are alternate address space */
+ } else {
+ if ( frame != ((int) APTDpde & PG_FRAME)) {
+ APTDpde = pmap->pm_pdir[PTDPTDI];
+ tlbflush();
+ }
+ pa = *(int *) avtopte(va);
+ }
+ pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
+ return pa;
+ }
+ return 0;
+
+}
+
+/*
+ * determine if a page is managed (memory vs. device)
+ */
+static inline int
+pmap_is_managed(pa)
+ vm_offset_t pa;
+{
+ int i;
+
+ if (!pmap_initialized)
+ return 0;
+
+ for (i = 0; phys_avail[i + 1]; i += 2) {
+ if (pa >= phys_avail[i] && pa < phys_avail[i + 1])
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * find the vm_page_t of a pte (only) given va of pte and pmap
+ */
+inline vm_page_t
+pmap_pte_vm_page(pmap, pt)
+ pmap_t pmap;
+ vm_offset_t pt;
+{
+ pt = i386_trunc_page( pt);
+ pt = (pt - UPT_MIN_ADDRESS) / NBPG;
+ pt = ((vm_offset_t) pmap->pm_pdir[pt]) & PG_FRAME;
+ return PHYS_TO_VM_PAGE(pt);
+}
+
+/*
+ * Wire a page table page
+ */
+inline void
+pmap_use_pt(pmap, va)
+ pmap_t pmap;
+ vm_offset_t va;
+{
+ vm_offset_t pt;
+
+ if (va >= VM_MAX_ADDRESS || !pmap_initialized)
+ return;
+
+ pt = (vm_offset_t) vtopte(va);
+ vm_page_hold( pmap_pte_vm_page(pmap, pt));
+}
+
+/*
+ * Unwire a page table page
+ */
+inline void
+pmap_unuse_pt(pmap, va)
+ pmap_t pmap;
+ vm_offset_t va;
+{
+ vm_offset_t pt;
+
+ if (va >= VM_MAX_ADDRESS || !pmap_initialized)
+ return;
+
+ pt = (vm_offset_t) vtopte(va);
+ vm_page_unhold( pmap_pte_vm_page(pmap, pt));
+}
+
+/* [ macro again?, should I force kstack into user map here? -wfj ] */
+void
+pmap_activate(pmap, pcbp)
+ register pmap_t pmap;
+ struct pcb *pcbp;
+{
+ PMAP_ACTIVATE(pmap, pcbp);
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Map the kernel's code and data, and allocate the system page table.
+ *
+ * On the I386 this is called after mapping has already been enabled
+ * and just syncs the pmap module with what has already been done.
+ * [We can't call it easily with mapping off since the kernel is not
+ * mapped with PA == VA, hence we would have to relocate every address
+ * from the linked base (virtual) address "KERNBASE" to the actual
+ * (physical) address starting relative to 0]
+ */
+
+#define DMAPAGES 8
+void
+pmap_bootstrap(firstaddr, loadaddr)
+ vm_offset_t firstaddr;
+ vm_offset_t loadaddr;
+{
+#if BSDVM_COMPAT
+ vm_offset_t va;
+ pt_entry_t *pte;
+#endif
+ extern int IdlePTD;
+
+ avail_start = firstaddr + DMAPAGES*NBPG;
+
+ virtual_avail = (vm_offset_t) KERNBASE + avail_start;
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+ i386pagesperpage = PAGE_SIZE / NBPG;
+
+ /*
+ * Initialize protection array.
+ */
+ i386_protection_init();
+
+ /*
+ * The kernel's pmap is statically allocated so we don't
+ * have to use pmap_create, which is unlikely to work
+ * correctly at this part of the boot sequence.
+ */
+ kernel_pmap = &kernel_pmap_store;
+
+ kernel_pmap->pm_pdir = (pd_entry_t *)(KERNBASE + IdlePTD);
+
+ simple_lock_init(&kernel_pmap->pm_lock);
+ kernel_pmap->pm_count = 1;
+
+#if BSDVM_COMPAT
+ /*
+ * Allocate all the submaps we need
+ */
+#define SYSMAP(c, p, v, n) \
+ v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
+
+ va = virtual_avail;
+ pte = pmap_pte(kernel_pmap, va);
+
+ SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
+ SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
+ SYSMAP(caddr_t ,ptmmap ,ptvmmap ,1 )
+ SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 )
+ virtual_avail = va;
+#endif
+ /*
+ * reserve special hunk of memory for use by bus dma as a bounce
+ * buffer (contiguous virtual *and* physical memory). for now,
+ * assume vm does not use memory beneath hole, and we know that
+ * the bootstrap uses top 32k of base memory. -wfj
+ */
+ {
+ extern vm_offset_t isaphysmem;
+ isaphysmem = va;
+
+ virtual_avail = pmap_map(va, firstaddr,
+ firstaddr + DMAPAGES*NBPG, VM_PROT_ALL);
+ }
+
+ *(int *)PTD = 0;
+ tlbflush();
+
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ * pmap_init has been enhanced to support in a fairly consistant
+ * way, discontiguous physical memory.
+ */
+void
+pmap_init(phys_start, phys_end)
+ vm_offset_t phys_start, phys_end;
+{
+ vm_offset_t addr, addr2;
+ vm_size_t npg, s;
+ int rv;
+ int i;
+ extern int KPTphys;
+ extern int IdlePTD;
+
+ /*
+ * Now that kernel map has been allocated, we can mark as
+ * unavailable regions which we have mapped in locore.
+ */
+ addr = atdevbase;
+ (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
+ &addr, (0x100000-0xa0000), FALSE);
+
+ addr = (vm_offset_t) KERNBASE + IdlePTD;
+ vm_object_reference(kernel_object);
+ (void) vm_map_find(kernel_map, kernel_object, addr,
+ &addr, (4 + NKPT) * NBPG, FALSE);
+
+
+ /*
+ * calculate the number of pv_entries needed
+ */
+ vm_first_phys = phys_avail[0];
+ for (i = 0; phys_avail[i + 1]; i += 2) ;
+ npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / NBPG;
+
+ /*
+ * Allocate memory for random pmap data structures. Includes the
+ * pv_head_table.
+ */
+ s = (vm_size_t) (sizeof(struct pv_entry) * npg);
+ s = i386_round_page(s);
+ addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+ pv_table = (pv_entry_t) addr;
+
+ /*
+ * init the pv free list
+ */
+ init_pv_entries(npg);
+ /*
+ * Now it is safe to enable pv_table recording.
+ */
+ pmap_initialized = TRUE;
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vm_offset_t
+pmap_map(virt, start, end, prot)
+ vm_offset_t virt;
+ vm_offset_t start;
+ vm_offset_t end;
+ int prot;
+{
+ while (start < end) {
+ pmap_enter(kernel_pmap, virt, start, prot, FALSE);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ return(virt);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ *
+ * [ just allocate a ptd and mark it uninitialize -- should we track
+ * with a table which process has which ptd? -wfj ]
+ */
+
+pmap_t
+pmap_create(size)
+ vm_size_t size;
+{
+ register pmap_t pmap;
+
+ /*
+ * Software use map does not need a pmap
+ */
+ if (size)
+ return(NULL);
+
+ pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
+ bzero(pmap, sizeof(*pmap));
+ pmap_pinit(pmap);
+ return (pmap);
+}
+
+
+struct pmaplist {
+ struct pmaplist *next;
+};
+
+static inline void *
+vm_get_pmap()
+{
+ struct pmaplist *rtval;
+
+ rtval = (struct pmaplist *)kmem_alloc(kernel_map, ctob(1));
+ bzero(rtval, ctob(1));
+ return rtval;
+}
+
+static inline void
+vm_put_pmap(up)
+ struct pmaplist *up;
+{
+ kmem_free(kernel_map, (vm_offset_t)up, ctob(1));
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+void
+pmap_pinit(pmap)
+ register struct pmap *pmap;
+{
+ /*
+ * No need to allocate page table space yet but we do need a
+ * valid page directory table.
+ */
+ pmap->pm_pdir = (pd_entry_t *) vm_get_pmap();
+
+ /* wire in kernel global address entries */
+ bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, NKPT*PTESIZE);
+
+ /* install self-referential address mapping entry */
+ *(int *)(pmap->pm_pdir+PTDPTDI) =
+ ((int)pmap_kextract((vm_offset_t)pmap->pm_pdir)) | PG_V | PG_KW;
+
+ pmap->pm_count = 1;
+ simple_lock_init(&pmap->pm_lock);
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+void
+pmap_destroy(pmap)
+ register pmap_t pmap;
+{
+ int count;
+
+ if (pmap == NULL)
+ return;
+
+ simple_lock(&pmap->pm_lock);
+ count = --pmap->pm_count;
+ simple_unlock(&pmap->pm_lock);
+ if (count == 0) {
+ pmap_release(pmap);
+ free((caddr_t)pmap, M_VMPMAP);
+ }
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap)
+ register struct pmap *pmap;
+{
+ vm_put_pmap((struct pmaplist *) pmap->pm_pdir);
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap)
+ pmap_t pmap;
+{
+ if (pmap != NULL) {
+ simple_lock(&pmap->pm_lock);
+ pmap->pm_count++;
+ simple_unlock(&pmap->pm_lock);
+ }
+}
+
+#define PV_FREELIST_MIN ((NBPG / sizeof (struct pv_entry)) / 2)
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+int pv_freelistcnt;
+pv_entry_t pv_freelist;
+vm_offset_t pvva;
+int npvvapg;
+
+/*
+ * free the pv_entry back to the free list
+ */
+inline static void
+free_pv_entry(pv)
+ pv_entry_t pv;
+{
+ if (!pv) return;
+ ++pv_freelistcnt;
+ pv->pv_next = pv_freelist;
+ pv_freelist = pv;
+}
+
+/*
+ * get a new pv_entry, allocating a block from the system
+ * when needed.
+ * the memory allocation is performed bypassing the malloc code
+ * because of the possibility of allocations at interrupt time.
+ */
+static inline pv_entry_t
+get_pv_entry()
+{
+ pv_entry_t tmp;
+
+ /*
+ * get more pv_entry pages if needed
+ */
+ while (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) {
+ pmap_alloc_pv_entry();
+ }
+
+ /*
+ * get a pv_entry off of the free list
+ */
+ --pv_freelistcnt;
+ tmp = pv_freelist;
+ pv_freelist = tmp->pv_next;
+ tmp->pv_pmap = 0;
+ tmp->pv_va = 0;
+ tmp->pv_next = 0;
+ return tmp;
+}
+
+/*
+ * this *strange* allocation routine *statistically* eliminates the
+ * *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure.
+ * also -- this code is MUCH MUCH faster than the malloc equiv...
+ */
+void
+pmap_alloc_pv_entry()
+{
+ /*
+ * do we have any pre-allocated map-pages left?
+ */
+ if (npvvapg) {
+ vm_page_t m;
+ /*
+ * we do this to keep recursion away
+ */
+ pv_freelistcnt += PV_FREELIST_MIN;
+ /*
+ * allocate a physical page out of the vm system
+ */
+ if (m = vm_page_alloc(kernel_object, pvva-vm_map_min(kernel_map))) {
+ int newentries;
+ int i;
+ pv_entry_t entry;
+ newentries = (NBPG/sizeof (struct pv_entry));
+ /*
+ * wire the page
+ */
+ vm_page_wire(m);
+ m->flags &= ~PG_BUSY;
+ /*
+ * let the kernel see it
+ */
+ pmap_enter(vm_map_pmap(kernel_map), pvva,
+ VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT,1);
+
+ entry = (pv_entry_t) pvva;
+ /*
+ * update the allocation pointers
+ */
+ pvva += NBPG;
+ --npvvapg;
+
+ /*
+ * free the entries into the free list
+ */
+ for (i = 0; i < newentries; i++) {
+ free_pv_entry(entry);
+ entry++;
+ }
+ }
+ pv_freelistcnt -= PV_FREELIST_MIN;
+ }
+ if (!pv_freelist)
+ panic("get_pv_entry: cannot get a pv_entry_t");
+}
+
+
+
+/*
+ * init the pv_entry allocation system
+ */
+#define PVSPERPAGE 64
+void
+init_pv_entries(npg)
+ int npg;
+{
+ /*
+ * allocate enough kvm space for PVSPERPAGE entries per page (lots)
+ * kvm space is fairly cheap, be generous!!! (the system can panic
+ * if this is too small.)
+ */
+ npvvapg = ((npg*PVSPERPAGE) * sizeof(struct pv_entry) + NBPG - 1)/NBPG;
+ pvva = kmem_alloc_pageable(kernel_map, npvvapg * NBPG);
+ /*
+ * get the first batch of entries
+ */
+ free_pv_entry(get_pv_entry());
+}
+
+static pt_entry_t *
+get_pt_entry(pmap)
+ pmap_t pmap;
+{
+ pt_entry_t *ptp;
+ vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
+ /* are we current address space or kernel? */
+ if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) {
+ ptp=PTmap;
+ /* otherwise, we are alternate address space */
+ } else {
+ if ( frame != ((int) APTDpde & PG_FRAME)) {
+ APTDpde = pmap->pm_pdir[PTDPTDI];
+ tlbflush();
+ }
+ ptp=APTmap;
+ }
+ return ptp;
+}
+
+/*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+void
+pmap_remove_entry(pmap, pv, va)
+ struct pmap *pmap;
+ pv_entry_t pv;
+ vm_offset_t va;
+{
+ pv_entry_t npv;
+ int wired;
+ int s;
+ s = splimp();
+ if (pmap == pv->pv_pmap && va == pv->pv_va) {
+ npv = pv->pv_next;
+ if (npv) {
+ *pv = *npv;
+ free_pv_entry(npv);
+ } else {
+ pv->pv_pmap = NULL;
+ }
+ } else {
+ for (npv = pv->pv_next; npv; npv = npv->pv_next) {
+ if (pmap == npv->pv_pmap && va == npv->pv_va) {
+ break;
+ }
+ pv = npv;
+ }
+ if (npv) {
+ pv->pv_next = npv->pv_next;
+ free_pv_entry(npv);
+ }
+ }
+ splx(s);
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap, sva, eva)
+ struct pmap *pmap;
+ register vm_offset_t sva;
+ register vm_offset_t eva;
+{
+ register pt_entry_t *ptp,*ptq;
+ vm_offset_t pa;
+ register pv_entry_t pv;
+ vm_offset_t va;
+ vm_page_t m;
+ pt_entry_t oldpte;
+
+ if (pmap == NULL)
+ return;
+
+ ptp = get_pt_entry(pmap);
+
+/*
+ * special handling of removing one page. a very
+ * common operation and easy to short circuit some
+ * code.
+ */
+ if( (sva + NBPG) == eva) {
+
+ if( *pmap_pde( pmap, sva) == 0)
+ return;
+
+ ptq = ptp + i386_btop(sva);
+
+ if( !*ptq)
+ return;
+ /*
+ * Update statistics
+ */
+ if (pmap_pte_w(ptq))
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ pa = pmap_pte_pa(ptq);
+ oldpte = *ptq;
+ *ptq = 0;
+
+ if (pmap_is_managed(pa)) {
+ if ((((int) oldpte & PG_M) && (sva < USRSTACK || sva > UPT_MAX_ADDRESS))
+ || (sva >= USRSTACK && sva < USRSTACK+(UPAGES*NBPG))) {
+ if (sva < clean_sva || sva >= clean_eva) {
+ m = PHYS_TO_VM_PAGE(pa);
+ m->flags &= ~PG_CLEAN;
+ }
+ }
+
+ pv = pa_to_pvh(pa);
+ pmap_remove_entry(pmap, pv, sva);
+ pmap_unuse_pt(pmap, sva);
+ }
+ tlbflush();
+ return;
+ }
+
+ sva = i386_btop(sva);
+ eva = i386_btop(eva);
+
+ while (sva < eva) {
+ /*
+ * Weed out invalid mappings.
+ * Note: we assume that the page directory table is
+ * always allocated, and in kernel virtual.
+ */
+
+ if ( *pmap_pde(pmap, i386_ptob(sva)) == 0 ) {
+ /* We can race ahead here, straight to next pde.. */
+ nextpde:
+ sva = ((sva + NPTEPG) & ~(NPTEPG - 1));
+ continue;
+ }
+
+ ptq = ptp + sva;
+
+ /*
+ * search for page table entries, use string operations
+ * that are much faster than
+ * explicitly scanning when page tables are not fully
+ * populated.
+ */
+ if ( *ptq == 0) {
+ vm_offset_t pdnxt = ((sva + NPTEPG) & ~(NPTEPG - 1));
+ vm_offset_t nscan = pdnxt - sva;
+ int found = 0;
+
+ if ((nscan + sva) > eva)
+ nscan = eva - sva;
+
+ asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;"
+ :"=D"(ptq),"=a"(found)
+ :"c"(nscan),"0"(ptq)
+ :"cx");
+
+ if( !found) {
+ sva = pdnxt;
+ continue;
+ }
+ ptq -= 1;
+
+ sva = ptq - ptp;
+ }
+
+ /*
+ * Update statistics
+ */
+ oldpte = *ptq;
+ if (((int)oldpte) & PG_W)
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ /*
+ * Invalidate the PTEs.
+ * XXX: should cluster them up and invalidate as many
+ * as possible at once.
+ */
+ *ptq = 0;
+
+ va = i386_ptob(sva);
+
+ /*
+ * Remove from the PV table (raise IPL since we
+ * may be called at interrupt time).
+ */
+ pa = ((int)oldpte) & PG_FRAME;
+ if (!pmap_is_managed(pa)) {
+ ++sva;
+ continue;
+ }
+
+ if ((((int) oldpte & PG_M) && (va < USRSTACK || va > UPT_MAX_ADDRESS))
+ || (va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
+ if (va < clean_sva || va >= clean_eva ) {
+ m = PHYS_TO_VM_PAGE(pa);
+ m->flags &= ~PG_CLEAN;
+ }
+ }
+
+ pv = pa_to_pvh(pa);
+ pmap_remove_entry(pmap, pv, va);
+ pmap_unuse_pt(pmap, va);
+ ++sva;
+ }
+ tlbflush();
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ *
+ * Notes:
+ * Original versions of this routine were very
+ * inefficient because they iteratively called
+ * pmap_remove (slow...)
+ */
+void
+pmap_remove_all(pa)
+ vm_offset_t pa;
+{
+ register pv_entry_t pv, npv;
+ register pt_entry_t *pte, *ptp;
+ vm_offset_t va;
+ struct pmap *pmap;
+ struct map *map;
+ vm_page_t m;
+ int s;
+
+ /*
+ * Not one of ours
+ */
+ if (!pmap_is_managed(pa))
+ return;
+
+ pa = i386_trunc_page(pa);
+ pv = pa_to_pvh(pa);
+ m = PHYS_TO_VM_PAGE(pa);
+
+ s = splimp();
+ while (pv->pv_pmap != NULL) {
+ pmap = pv->pv_pmap;
+ ptp = get_pt_entry(pmap);
+ va = i386_btop(pv->pv_va);
+ pte = ptp + va;
+ if (pmap_pte_w(pte))
+ pmap->pm_stats.wired_count--;
+ if ( *pte)
+ pmap->pm_stats.resident_count--;
+
+ /*
+ * update the vm_page_t clean bit
+ */
+ if ( (m->flags & PG_CLEAN) &&
+ ((((int) *pte) & PG_M) && (pv->pv_va < USRSTACK || pv->pv_va > UPT_MAX_ADDRESS))
+ || (pv->pv_va >= USRSTACK && pv->pv_va < USRSTACK+(UPAGES*NBPG))) {
+ if (pv->pv_va < clean_sva || pv->pv_va >= clean_eva) {
+ m->flags &= ~PG_CLEAN;
+ }
+ }
+
+ *pte = 0;
+ pmap_unuse_pt(pmap, pv->pv_va);
+
+ npv = pv->pv_next;
+ if (npv) {
+ *pv = *npv;
+ free_pv_entry(npv);
+ } else {
+ pv->pv_pmap = NULL;
+ }
+ }
+ splx(s);
+ tlbflush();
+}
+
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap, sva, eva, prot)
+ register pmap_t pmap;
+ vm_offset_t sva, eva;
+ vm_prot_t prot;
+{
+ register pt_entry_t *pte;
+ register vm_offset_t va;
+ int i386prot;
+ register pt_entry_t *ptp;
+ int evap = i386_btop(eva);
+ int s;
+
+ if (pmap == NULL)
+ return;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+ if (prot & VM_PROT_WRITE)
+ return;
+
+ ptp = get_pt_entry(pmap);
+
+ va = sva;
+ while (va < eva) {
+ int found=0;
+ int svap;
+ vm_offset_t nscan;
+ /*
+ * Page table page is not allocated.
+ * Skip it, we don't want to force allocation
+ * of unnecessary PTE pages just to set the protection.
+ */
+ if (! *pmap_pde(pmap, va)) {
+ /* XXX: avoid address wrap around */
+nextpde:
+ if (va >= i386_trunc_pdr((vm_offset_t)-1))
+ break;
+ va = i386_round_pdr(va + PAGE_SIZE);
+ continue;
+ }
+
+ pte = ptp + i386_btop(va);
+
+ if( *pte == 0) {
+ /*
+ * scan for a non-empty pte
+ */
+ svap = pte - ptp;
+ nscan = ((svap + NPTEPG) & ~(NPTEPG - 1)) - svap;
+
+ if (nscan + svap > evap)
+ nscan = evap - svap;
+
+ found = 0;
+ if (nscan)
+ asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;"
+ :"=D"(pte),"=a"(found)
+ :"c"(nscan),"0"(pte):"cx");
+
+ if( !found)
+ goto nextpde;
+
+ pte -= 1;
+ svap = pte - ptp;
+
+ va = i386_ptob(svap);
+ }
+
+ i386prot = pte_prot(pmap, prot);
+ if (va < UPT_MAX_ADDRESS) {
+ i386prot |= PG_u;
+ if( va >= UPT_MIN_ADDRESS)
+ i386prot |= PG_RW;
+ }
+ pmap_pte_set_prot(pte, i386prot);
+ va += PAGE_SIZE;
+ }
+ tlbflush();
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+void
+pmap_enter(pmap, va, pa, prot, wired)
+ register pmap_t pmap;
+ vm_offset_t va;
+ register vm_offset_t pa;
+ vm_prot_t prot;
+ boolean_t wired;
+{
+ register pt_entry_t *pte;
+ register pt_entry_t npte;
+ vm_offset_t opa;
+ int cacheable=1;
+
+ if (pmap == NULL)
+ return;
+
+ va = i386_trunc_page(va);
+ pa = i386_trunc_page(pa);
+ if (va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
+
+ /*
+ * Page Directory table entry not valid, we need a new PT page
+ */
+ if ( *pmap_pde(pmap, va) == 0) {
+ pg("ptdi %x, va %x", pmap->pm_pdir[PTDPTDI], va);
+ }
+
+ pte = pmap_pte(pmap, va);
+ opa = pmap_pte_pa(pte);
+
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if (opa == pa) {
+ /*
+ * Wiring change, just update stats.
+ * We don't worry about wiring PT pages as they remain
+ * resident as long as there are valid mappings in them.
+ * Hence, if a user page is wired, the PT page will be also.
+ */
+ if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ else
+ pmap->pm_stats.wired_count--;
+ }
+ goto validate;
+ }
+
+ /*
+ * Mapping has changed, invalidate old range and fall through to
+ * handle validating new mapping.
+ */
+ if (opa) {
+ pmap_remove(pmap, va, va + PAGE_SIZE);
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory
+ * Note that we raise IPL while manipulating pv_table
+ * since pmap_enter can be called at interrupt time.
+ */
+ if (pmap_is_managed(pa)) {
+ register pv_entry_t pv, npv;
+ int s;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * No entries yet, use header as the first entry
+ */
+ if (pv->pv_pmap == NULL) {
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+ pv->pv_next = NULL;
+ }
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ else {
+ npv = get_pv_entry();
+ npv->pv_va = va;
+ npv->pv_pmap = pmap;
+ npv->pv_next = pv->pv_next;
+ pv->pv_next = npv;
+ }
+ splx(s);
+ cacheable = 1;
+ } else {
+ cacheable = 0;
+ }
+
+ pmap_use_pt(pmap, va);
+
+ /*
+ * Increment counters
+ */
+ pmap->pm_stats.resident_count++;
+ if (wired)
+ pmap->pm_stats.wired_count++;
+
+validate:
+ /*
+ * Now validate mapping with desired protection/wiring.
+ */
+ npte = (pt_entry_t) ( (int) (pa | pte_prot(pmap, prot) | PG_V));
+ /*
+ * for correctness:
+ */
+ if( !cacheable)
+ (int) npte |= PG_N;
+
+ /*
+ * When forking (copy-on-write, etc):
+ * A process will turn off write permissions for any of its writable
+ * pages. If the data (object) is only referred to by one process, the
+ * processes map is modified directly as opposed to using the
+ * object manipulation routine. When using pmap_protect, the
+ * modified bits are not kept in the vm_page_t data structure.
+ * Therefore, when using pmap_enter in vm_fault to bring back
+ * writability of a page, there has been no memory of the
+ * modified or referenced bits except at the pte level.
+ * this clause supports the carryover of the modified and
+ * used (referenced) bits.
+ */
+ if (pa == opa)
+ (int) npte |= (int) *pte & (PG_M|PG_U);
+
+
+ if (wired)
+ (int) npte |= PG_W;
+ if (va < UPT_MIN_ADDRESS)
+ (int) npte |= PG_u;
+ else if (va < UPT_MAX_ADDRESS)
+ (int) npte |= PG_u | PG_RW | PG_NC_PWT;
+
+/*
+ printf("mapping: pa: %x, to va: %x, with pte: %x\n", pa, va, npte);
+*/
+
+ if( *pte != npte) {
+ *pte = npte;
+ tlbflush();
+ }
+}
+
+/*
+ * add a wired page to the kva
+ */
+void
+pmap_kenter(va, pa)
+ vm_offset_t va;
+ register vm_offset_t pa;
+{
+ register pt_entry_t *pte;
+ register pv_entry_t pv, npv;
+ vm_offset_t opa;
+ int s;
+
+ /*
+ * Enter on the PV list if part of our managed memory
+ * Note that we raise IPL while manipulating pv_table
+ * since pmap_enter can be called at interrupt time.
+ */
+
+ pte = vtopte(va);
+
+ opa = pmap_pte_pa(pte);
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if (opa == pa) {
+ /*
+ * Wiring change, just update stats.
+ * We don't worry about wiring PT pages as they remain
+ * resident as long as there are valid mappings in them.
+ * Hence, if a user page is wired, the PT page will be also.
+ */
+ if (!pmap_pte_w(pte)) {
+ kernel_pmap->pm_stats.wired_count++;
+ }
+ goto validate;
+ }
+
+ if (opa) {
+ pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
+ }
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * No entries yet, use header as the first entry
+ */
+ if (pv->pv_pmap == NULL) {
+ pv->pv_va = va;
+ pv->pv_pmap = kernel_pmap;
+ pv->pv_next = NULL;
+ }
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ else {
+ npv = get_pv_entry();
+ npv->pv_va = va;
+ npv->pv_pmap = kernel_pmap;
+ npv->pv_next = pv->pv_next;
+ pv->pv_next = npv;
+ }
+ splx(s);
+
+ /*
+ * Increment counters
+ */
+ kernel_pmap->pm_stats.resident_count++;
+
+validate:
+
+ /*
+ * Now validate mapping with desired protection/wiring.
+ */
+ *pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * 5. Tlbflush is deferred to calling procedure.
+ * 6. Page IS managed.
+ * but is *MUCH* faster than pmap_enter...
+ */
+
+static inline void
+pmap_enter_quick(pmap, va, pa)
+ register pmap_t pmap;
+ vm_offset_t va;
+ register vm_offset_t pa;
+{
+ register pt_entry_t *pte;
+ register pv_entry_t pv, npv;
+ int s;
+
+ /*
+ * Enter on the PV list if part of our managed memory
+ * Note that we raise IPL while manipulating pv_table
+ * since pmap_enter can be called at interrupt time.
+ */
+
+ pte = vtopte(va);
+ if (pmap_pte_pa(pte)) {
+ pmap_remove(pmap, va, va + PAGE_SIZE);
+ }
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * No entries yet, use header as the first entry
+ */
+ if (pv->pv_pmap == NULL) {
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+ pv->pv_next = NULL;
+ }
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ else {
+ npv = get_pv_entry();
+ npv->pv_va = va;
+ npv->pv_pmap = pmap;
+ npv->pv_next = pv->pv_next;
+ pv->pv_next = npv;
+ }
+ splx(s);
+
+ pmap_use_pt(pmap, va);
+
+ /*
+ * Increment counters
+ */
+ pmap->pm_stats.resident_count++;
+
+validate:
+
+ /*
+ * Now validate mapping with desired protection/wiring.
+ */
+ *pte = (pt_entry_t) ( (int) (pa | PG_V | PG_u));
+}
+
+/*
+ * pmap_object_init_pt preloads the ptes for a given object
+ * into the specified pmap. This eliminates the blast of soft
+ * faults on process startup and immediately after an mmap.
+ */
+void
+pmap_object_init_pt(pmap, addr, object, offset, size)
+ pmap_t pmap;
+ vm_offset_t addr;
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_offset_t size;
+{
+
+ vm_offset_t tmpoff;
+ vm_page_t p;
+ int s;
+ vm_offset_t v, lastv=0;
+ pt_entry_t pte;
+ extern vm_map_t kernel_map;
+ vm_offset_t objbytes;
+
+ if (!pmap)
+ return;
+
+ /*
+ * if we are processing a major portion of the object, then
+ * scan the entire thing.
+ */
+ if( size > object->size / 2) {
+ objbytes = size;
+ p = object->memq.tqh_first;
+ while ((p != NULL) && (objbytes != 0)) {
+ tmpoff = p->offset;
+ if( tmpoff < offset) {
+ p = p->listq.tqe_next;
+ continue;
+ }
+ tmpoff -= offset;
+ if( tmpoff >= size) {
+ p = p->listq.tqe_next;
+ continue;
+ }
+
+ if ((p->flags & (PG_BUSY|PG_FICTITIOUS)) == 0 ) {
+ vm_page_hold(p);
+ v = i386_trunc_page(((vm_offset_t)vtopte( addr+tmpoff)));
+ /* a fault might occur here */
+ *(volatile char *)v += 0;
+ vm_page_unhold(p);
+ pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
+ }
+ p = p->listq.tqe_next;
+ objbytes -= NBPG;
+ }
+ } else {
+ /*
+ * else lookup the pages one-by-one.
+ */
+ for(tmpoff = 0; tmpoff < size; tmpoff += NBPG) {
+ if( p = vm_page_lookup(object, tmpoff + offset)) {
+ if( (p->flags & (PG_BUSY|PG_FICTITIOUS)) == 0) {
+ vm_page_hold(p);
+ v = i386_trunc_page(((vm_offset_t)vtopte( addr+tmpoff)));
+ /* a fault might occur here */
+ *(volatile char *)v += 0;
+ vm_page_unhold(p);
+ pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
+ }
+ }
+ }
+ }
+
+ tlbflush();
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_change_wiring(pmap, va, wired)
+ register pmap_t pmap;
+ vm_offset_t va;
+ boolean_t wired;
+{
+ register pt_entry_t *pte;
+
+ if (pmap == NULL)
+ return;
+
+ pte = pmap_pte(pmap, va);
+ if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ else
+ pmap->pm_stats.wired_count--;
+ }
+ /*
+ * Wiring is not a hardware characteristic so there is no need
+ * to invalidate TLB.
+ */
+ pmap_pte_set_w(pte, wired);
+ /*
+ * When unwiring, set the modified bit in the pte -- could have
+ * been changed by the kernel
+ */
+ if (!wired)
+ (int) *pte |= PG_M;
+}
+
+
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap, src_pmap;
+ vm_offset_t dst_addr;
+ vm_size_t len;
+ vm_offset_t src_addr;
+{
+}
+/*
+ * Require that all active physical maps contain no
+ * incorrect entries NOW. [This update includes
+ * forcing updates of any address map caching.]
+ *
+ * Generally used to insure that a thread about
+ * to run will see a semantically correct world.
+ */
+void
+pmap_update()
+{
+ tlbflush();
+}
+
+/*
+ * Routine: pmap_kernel
+ * Function:
+ * Returns the physical map handle for the kernel.
+ */
+pmap_t
+pmap_kernel()
+{
+ return (kernel_pmap);
+}
+
+/*
+ * pmap_zero_page zeros the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bzero to clear its contents, one machine dependent page
+ * at a time.
+ */
+void
+pmap_zero_page(phys)
+ vm_offset_t phys;
+{
+ *(int *)CMAP2 = PG_V | PG_KW | i386_trunc_page(phys);
+ tlbflush();
+ bzero(CADDR2,NBPG);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ */
+void
+pmap_copy_page(src, dst)
+ vm_offset_t src;
+ vm_offset_t dst;
+{
+ *(int *)CMAP1 = PG_V | PG_KW | i386_trunc_page(src);
+ *(int *)CMAP2 = PG_V | PG_KW | i386_trunc_page(dst);
+ tlbflush();
+
+#if __GNUC__ > 1
+ memcpy(CADDR2, CADDR1, NBPG);
+#else
+ bcopy(CADDR1, CADDR2, NBPG);
+#endif
+}
+
+
+/*
+ * Routine: pmap_pageable
+ * Function:
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
+ *
+ * A page which is not pageable may not take
+ * a fault; therefore, its page table entry
+ * must remain valid for the duration.
+ *
+ * This routine is merely advisory; pmap_enter
+ * will specify that these pages are to be wired
+ * down (or not) as appropriate.
+ */
+void
+pmap_pageable(pmap, sva, eva, pageable)
+ pmap_t pmap;
+ vm_offset_t sva, eva;
+ boolean_t pageable;
+{
+}
+
+/*
+ * this routine returns true if a physical page resides
+ * in the given pmap.
+ */
+boolean_t
+pmap_page_exists(pmap, pa)
+ pmap_t pmap;
+ vm_offset_t pa;
+{
+ register pv_entry_t pv;
+ int s;
+
+ if (!pmap_is_managed(pa))
+ return FALSE;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+
+ /*
+ * Not found, check current mappings returning
+ * immediately if found.
+ */
+ if (pv->pv_pmap != NULL) {
+ for (; pv; pv = pv->pv_next) {
+ if (pv->pv_pmap == pmap) {
+ splx(s);
+ return TRUE;
+ }
+ }
+ }
+ splx(s);
+ return(FALSE);
+}
+
+/*
+ * pmap_testbit tests bits in pte's
+ * note that the testbit/changebit routines are inline,
+ * and a lot of things compile-time evaluate.
+ */
+static inline boolean_t
+pmap_testbit(pa, bit)
+ register vm_offset_t pa;
+ int bit;
+{
+ register pv_entry_t pv;
+ pt_entry_t *pte;
+ int s;
+
+ if (!pmap_is_managed(pa))
+ return FALSE;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+
+ /*
+ * Not found, check current mappings returning
+ * immediately if found.
+ */
+ if (pv->pv_pmap != NULL) {
+ for (; pv; pv = pv->pv_next) {
+ /*
+ * if the bit being tested is the modified bit,
+ * then mark UPAGES as always modified, and
+ * ptes as never modified.
+ */
+ if (bit & PG_U ) {
+ if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
+ continue;
+ }
+ }
+ if (bit & PG_M ) {
+ if (pv->pv_va >= USRSTACK) {
+ if (pv->pv_va >= clean_sva && pv->pv_va < clean_eva) {
+ continue;
+ }
+ if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) {
+ splx(s);
+ return TRUE;
+ }
+ else if (pv->pv_va < UPT_MAX_ADDRESS) {
+ splx(s);
+ return FALSE;
+ }
+ }
+ }
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ if ((int) *pte & bit) {
+ splx(s);
+ return TRUE;
+ }
+ }
+ }
+ splx(s);
+ return(FALSE);
+}
+
+/*
+ * this routine is used to modify bits in ptes
+ */
+static inline void
+pmap_changebit(pa, bit, setem)
+ vm_offset_t pa;
+ int bit;
+ boolean_t setem;
+{
+ register pv_entry_t pv;
+ register pt_entry_t *pte, npte;
+ vm_offset_t va;
+ int s;
+
+ if (!pmap_is_managed(pa))
+ return;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+
+ /*
+ * Loop over all current mappings setting/clearing as appropos
+ * If setting RO do we need to clear the VAC?
+ */
+ if (pv->pv_pmap != NULL) {
+ for (; pv; pv = pv->pv_next) {
+ va = pv->pv_va;
+
+ /*
+ * don't write protect pager mappings
+ */
+ if (!setem && (bit == PG_RW)) {
+ if (va >= clean_sva && va < clean_eva)
+ continue;
+ }
+
+ pte = pmap_pte(pv->pv_pmap, va);
+ if (setem)
+ (int) npte = (int) *pte | bit;
+ else
+ (int) npte = (int) *pte & ~bit;
+ *pte = npte;
+ }
+ }
+ splx(s);
+ tlbflush();
+}
+
+/*
+ * pmap_page_protect:
+ *
+ * Lower the permission for all mappings to a given page.
+ */
+void
+pmap_page_protect(phys, prot)
+ vm_offset_t phys;
+ vm_prot_t prot;
+{
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
+ pmap_changebit(phys, PG_RW, FALSE);
+ else
+ pmap_remove_all(phys);
+ }
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(pa)
+ vm_offset_t pa;
+{
+ pmap_changebit(pa, PG_M, FALSE);
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+void
+pmap_clear_reference(pa)
+ vm_offset_t pa;
+{
+ pmap_changebit(pa, PG_U, FALSE);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+
+boolean_t
+pmap_is_referenced(pa)
+ vm_offset_t pa;
+{
+ return(pmap_testbit(pa, PG_U));
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+
+boolean_t
+pmap_is_modified(pa)
+ vm_offset_t pa;
+{
+ return(pmap_testbit(pa, PG_M));
+}
+
+/*
+ * Routine: pmap_copy_on_write
+ * Function:
+ * Remove write privileges from all
+ * physical maps for this physical page.
+ */
+void
+pmap_copy_on_write(pa)
+ vm_offset_t pa;
+{
+ pmap_changebit(pa, PG_RW, FALSE);
+}
+
+
+vm_offset_t
+pmap_phys_address(ppn)
+ int ppn;
+{
+ return(i386_ptob(ppn));
+}
+
+/*
+ * Miscellaneous support routines follow
+ */
+/*
+ * This really just builds a table for page write enable
+ * translation.
+ */
+
+void
+i386_protection_init()
+{
+ register int *kp, prot;
+
+ kp = protection_codes;
+ for (prot = 0; prot < 8; prot++) {
+ switch (prot) {
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
+ *kp++ = 0;
+ break;
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ *kp++ = PG_RW;
+ break;
+ }
+ }
+}
+
+#ifdef DEBUG
+void
+pmap_pvdump(pa)
+ vm_offset_t pa;
+{
+ register pv_entry_t pv;
+
+ printf("pa %x", pa);
+ for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
+ printf(" -> pmap %x, va %x, flags %x",
+ pv->pv_pmap, pv->pv_va, pv->pv_flags);
+ pads(pv->pv_pmap);
+ }
+ printf(" ");
+}
+
+/* print address space of pmap*/
+void
+pads(pm)
+ pmap_t pm;
+{
+ unsigned va, i, j;
+ pt_entry_t *ptep;
+
+ if (pm == kernel_pmap) return;
+ for (i = 0; i < 1024; i++)
+ if (pm->pm_pdir[i])
+ for (j = 0; j < 1024 ; j++) {
+ va = (i<<PD_SHIFT)+(j<<PG_SHIFT);
+ if (pm == kernel_pmap && va < KERNBASE)
+ continue;
+ if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
+ continue;
+ ptep = pmap_pte(pm, va);
+ if (pmap_pte_v(ptep))
+ printf("%x:%x ", va, *(int *)ptep);
+ } ;
+
+}
+#endif
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
new file mode 100644
index 0000000..c4e37df
--- /dev/null
+++ b/sys/amd64/amd64/support.S
@@ -0,0 +1,1221 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: support.s,v 1.10 1994/06/06 14:23:49 davidg Exp $
+ */
+
+#include "assym.s" /* system definitions */
+#include "errno.h" /* error return codes */
+#include "machine/asmacros.h" /* miscellaneous asm macros */
+#include "machine/cputypes.h" /* types of CPUs */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define IDXSHIFT 10
+
+/*
+ * Support routines for GCC, general C-callable functions
+ */
+ENTRY(__udivsi3)
+ movl 4(%esp),%eax
+ xorl %edx,%edx
+ divl 8(%esp)
+ ret
+
+ENTRY(__divsi3)
+ movl 4(%esp),%eax
+ cltd
+ idivl 8(%esp)
+ ret
+
+ /*
+ * I/O bus instructions via C
+ */
+ENTRY(inb) /* val = inb(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ inb %dx,%al
+ NOP
+ ret
+
+ENTRY(inw) /* val = inw(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ inw %dx,%ax
+ NOP
+ ret
+
+ENTRY(insb) /* insb(port, addr, cnt) */
+ pushl %edi
+ movl 8(%esp),%edx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ insb
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(insw) /* insw(port, addr, cnt) */
+ pushl %edi
+ movl 8(%esp),%edx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ insw
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(insl) /* insl(port, addr, cnt) */
+ pushl %edi
+ movl 8(%esp),%edx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ insl
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(rtcin) /* rtcin(val) */
+ movl 4(%esp),%eax
+ outb %al,$0x70
+ NOP
+ xorl %eax,%eax
+ inb $0x71,%al
+ NOP
+ ret
+
+ENTRY(outb) /* outb(port, val) */
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ outb %al,%dx
+ NOP
+ ret
+
+ENTRY(outw) /* outw(port, val) */
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ outw %ax,%dx
+ NOP
+ ret
+
+ENTRY(outsb) /* outsb(port, addr, cnt) */
+ pushl %esi
+ movl 8(%esp),%edx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ outsb
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ENTRY(outsw) /* outsw(port, addr, cnt) */
+ pushl %esi
+ movl 8(%esp),%edx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ outsw
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ENTRY(outsl) /* outsl(port, addr, cnt) */
+ pushl %esi
+ movl 8(%esp),%edx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ outsl
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+/*
+ * bcopy family
+ */
+
+/*
+ * void bzero(void *base, u_int cnt)
+ * Special code for I486 because stosl uses lots
+ * of clocks. Makes little or no difference on DX2 type
+ * machines, but stosl is about 1/2 as fast as
+ * memory moves on a standard DX !!!!!
+ */
+ALTENTRY(blkclr)
+ENTRY(bzero)
+#if defined(I486_CPU)
+ cmpl $CPUCLASS_486,_cpu_class
+ jz 1f
+#endif
+
+ pushl %edi
+ movl 8(%esp),%edi
+ movl 12(%esp),%ecx
+ xorl %eax,%eax
+ shrl $2,%ecx
+ cld
+ rep
+ stosl
+ movl 12(%esp),%ecx
+ andl $3,%ecx
+ rep
+ stosb
+ popl %edi
+ ret
+
+#if defined(I486_CPU)
+ SUPERALIGN_TEXT
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%ecx
+ xorl %eax,%eax
+/
+/ do 64 byte chunks first
+/
+/ XXX this is probably over-unrolled at least for DX2's
+/
+2:
+ cmpl $64,%ecx
+ jb 3f
+ movl %eax,(%edx)
+ movl %eax,4(%edx)
+ movl %eax,8(%edx)
+ movl %eax,12(%edx)
+ movl %eax,16(%edx)
+ movl %eax,20(%edx)
+ movl %eax,24(%edx)
+ movl %eax,28(%edx)
+ movl %eax,32(%edx)
+ movl %eax,36(%edx)
+ movl %eax,40(%edx)
+ movl %eax,44(%edx)
+ movl %eax,48(%edx)
+ movl %eax,52(%edx)
+ movl %eax,56(%edx)
+ movl %eax,60(%edx)
+ addl $64,%edx
+ subl $64,%ecx
+ jnz 2b
+ ret
+
+/
+/ do 16 byte chunks
+/
+ SUPERALIGN_TEXT
+3:
+ cmpl $16,%ecx
+ jb 4f
+ movl %eax,(%edx)
+ movl %eax,4(%edx)
+ movl %eax,8(%edx)
+ movl %eax,12(%edx)
+ addl $16,%edx
+ subl $16,%ecx
+ jnz 3b
+ ret
+
+/
+/ do 4 byte chunks
+/
+ SUPERALIGN_TEXT
+4:
+ cmpl $4,%ecx
+ jb 5f
+ movl %eax,(%edx)
+ addl $4,%edx
+ subl $4,%ecx
+ jnz 4b
+ ret
+
+/
+/ do 1 byte chunks
+/ a jump table seems to be faster than a loop or more range reductions
+/
+/ XXX need a const section for non-text
+/
+ SUPERALIGN_TEXT
+jtab:
+ .long do0
+ .long do1
+ .long do2
+ .long do3
+
+ SUPERALIGN_TEXT
+5:
+ jmp jtab(,%ecx,4)
+
+ SUPERALIGN_TEXT
+do3:
+ movw %ax,(%edx)
+ movb %al,2(%edx)
+ ret
+
+ SUPERALIGN_TEXT
+do2:
+ movw %ax,(%edx)
+ ret
+
+ SUPERALIGN_TEXT
+do1:
+ movb %al,(%edx)
+
+ SUPERALIGN_TEXT
+do0:
+ ret
+#endif /* I486_CPU */
+
+/* fillw(pat, base, cnt) */
+ENTRY(fillw)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosw
+ popl %edi
+ ret
+
+/* filli(pat, base, cnt) */
+ENTRY(filli)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosl
+ popl %edi
+ ret
+
+ENTRY(bcopyb)
+bcopyb:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards. */
+ addl %ecx,%esi
+ std
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyw)
+bcopyw:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ shrl $1,%ecx /* copy by 16-bit words */
+ cld /* nope, copy forwards */
+ rep
+ movsw
+ adc %ecx,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ andl $1,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ std
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 16-bit words */
+ shrl $1,%ecx
+ decl %esi
+ decl %edi
+ rep
+ movsw
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyx)
+ movl 16(%esp),%eax
+ cmpl $2,%eax
+ je bcopyw /* not _bcopyw, to avoid multiple mcounts */
+ cmpl $4,%eax
+ je bcopy /* XXX the shared ret's break mexitcount */
+ jmp bcopyb
+
+/*
+ * (ov)bcopy(src, dst, cnt)
+ * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
+ */
+ALTENTRY(ovbcopy)
+ENTRY(bcopy)
+bcopy:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ shrl $2,%ecx /* copy by 32-bit words */
+ cld /* nope, copy forwards */
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ andl $3,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ std
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 32-bit words */
+ shrl $2,%ecx
+ subl $3,%esi
+ subl $3,%edi
+ rep
+ movsl
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ALTENTRY(ntohl)
+ENTRY(htonl)
+ movl 4(%esp),%eax
+#ifdef i486
+/* XXX */
+/* Since Gas 1.38 does not grok bswap this has been coded as the
+ * equivalent bytes. This can be changed back to bswap when we
+ * upgrade to a newer version of Gas
+ */
+ /* bswap %eax */
+ .byte 0x0f
+ .byte 0xc8
+#else
+ xchgb %al,%ah
+ roll $16,%eax
+ xchgb %al,%ah
+#endif
+ ret
+
+ALTENTRY(ntohs)
+ENTRY(htons)
+ movzwl 4(%esp),%eax
+ xchgb %al,%ah
+ ret
+
+/*****************************************************************************/
+/* copyout and fubyte family */
+/*****************************************************************************/
+/*
+ * Access user memory from inside the kernel. These routines and possibly
+ * the math- and DOS emulators should be the only places that do this.
+ *
+ * We have to access the memory with user's permissions, so use a segment
+ * selector with RPL 3. For writes to user space we have to additionally
+ * check the PTE for write permission, because the 386 does not check
+ * write permissions when we are executing with EPL 0. The 486 does check
+ * this if the WP bit is set in CR0, so we can use a simpler version here.
+ *
+ * These routines set curpcb->onfault for the time they execute. When a
+ * protection violation occurs inside the functions, the trap handler
+ * returns to *curpcb->onfault instead of the function.
+ */
+
+
+ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
+ movl _curpcb,%eax
+ movl $copyout_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ pushl %ebx
+ movl 16(%esp),%esi
+ movl 20(%esp),%edi
+ movl 24(%esp),%ebx
+ orl %ebx,%ebx /* anything to do? */
+ jz done_copyout
+
+ /*
+ * Check explicitly for non-user addresses. If 486 write protection
+ * is being used, this check is essential because we are in kernel
+ * mode so the h/w does not provide any protection against writing
+ * kernel addresses.
+ *
+ * Otherwise, it saves having to load and restore %es to get the
+ * usual segment-based protection (the destination segment for movs
+ * is always %es). The other explicit checks for user-writablility
+ * are not quite sufficient. They fail for the user area because
+ * we mapped the user area read/write to avoid having an #ifdef in
+ * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
+ * addresses including 0xff800000 and 0xfc000000). I'm not sure if
+ * this can be fixed. Marking the PTEs supervisor mode and the
+ * PDE's user mode would almost work, but there may be a problem
+ * with the self-referential PDE.
+ */
+ movl %edi,%eax
+ addl %ebx,%eax
+ jc copyout_fault
+/*
+ * XXX STOP USING VM_MAXUSER_ADDRESS.
+ * It is an end address, not a max, so every time it is used correctly it
+ * looks like there is an off by one error, and of course it caused an off
+ * by one error in several places.
+ */
+ cmpl $VM_MAXUSER_ADDRESS,%eax
+ ja copyout_fault
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 3f
+#endif
+/*
+ * We have to check each PTE for user write permission.
+ * The checking may cause a page fault, so it is important to set
+ * up everything for return via copyout_fault before here.
+ */
+ /* compute number of pages */
+ movl %edi,%ecx
+ andl $NBPG-1,%ecx
+ addl %ebx,%ecx
+ decl %ecx
+ shrl $IDXSHIFT+2,%ecx
+ incl %ecx
+
+ /* compute PTE offset for start address */
+ movl %edi,%edx
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+
+1: /* check PTE for each page */
+ movb _PTmap(%edx),%al
+ andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
+ cmpb $0x07,%al
+ je 2f
+
+ /* simulate a trap */
+ pushl %edx
+ pushl %ecx
+ shll $IDXSHIFT,%edx
+ pushl %edx
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+ popl %ecx
+ popl %edx
+
+ orl %eax,%eax /* if not ok, return EFAULT */
+ jnz copyout_fault
+
+2:
+ addl $4,%edx
+ decl %ecx
+ jnz 1b /* check next page */
+#endif /* I386_CPU */
+
+ /* bcopy(%esi, %edi, %ebx) */
+3:
+ movl %ebx,%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+ movb %bl,%cl
+ andb $3,%cl
+ rep
+ movsb
+
+done_copyout:
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyout_fault:
+ popl %ebx
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/* copyin(from_user, to_kernel, len) */
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $copyin_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi /* caddr_t from */
+ movl 16(%esp),%edi /* caddr_t to */
+ movl 20(%esp),%ecx /* size_t len */
+
+ /*
+ * make sure address is valid
+ */
+ movl %esi,%edx
+ addl %ecx,%edx
+ jc copyin_fault
+ cmpl $VM_MAXUSER_ADDRESS,%edx
+ ja copyin_fault
+
+ movb %cl,%al
+ shrl $2,%ecx /* copy longword-wise */
+ cld
+ rep
+ movsl
+ movb %al,%cl
+ andb $3,%cl /* copy remaining bytes */
+ rep
+ movsb
+
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyin_fault:
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/*
+ * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
+ */
+ALTENTRY(fuiword)
+ENTRY(fuword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx /* from */
+
+ cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
+ ja fusufault
+
+ movl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+/*
+ * These two routines are called from the profiling code, potentially
+ * at interrupt time. If they fail, that's okay, good things will
+ * happen later. Fail all the time for now - until the trap code is
+ * able to deal with this.
+ */
+ALTENTRY(suswintr)
+ENTRY(fuswintr)
+ movl $-1,%eax
+ ret
+
+ENTRY(fusword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+ cmpl $VM_MAXUSER_ADDRESS-2,%edx
+ ja fusufault
+
+ movzwl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(fuibyte)
+ENTRY(fubyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+ cmpl $VM_MAXUSER_ADDRESS-1,%edx
+ ja fusufault
+
+ movzbl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ ALIGN_TEXT
+fusufault:
+ movl _curpcb,%ecx
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ decl %eax
+ ret
+
+/*
+ * su{byte,sword,word}: write a byte (word, longword) to user memory
+ */
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 2f /* we only have to set the right segment selector */
+#endif /* I486_CPU || I586_CPU */
+
+ /* XXX - page boundary crossing is still not handled */
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx /* remove junk parameter from stack */
+ movl _curpcb,%ecx /* restore trashed register */
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+#endif
+
+2:
+ cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address validity */
+ ja fusufault
+
+ movl 8(%esp),%eax
+ movl %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 2f
+#endif /* I486_CPU || I586_CPU */
+
+ /* XXX - page boundary crossing is still not handled */
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx /* remove junk parameter from stack */
+ movl _curpcb,%ecx /* restore trashed register */
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+#endif
+
+2:
+ cmpl $VM_MAXUSER_ADDRESS-2,%edx /* verify address validity */
+ ja fusufault
+
+ movw 8(%esp),%ax
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 2f
+#endif /* I486_CPU || I586_CPU */
+
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx /* remove junk parameter from stack */
+ movl _curpcb,%ecx /* restore trashed register */
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+#endif
+
+2:
+ cmpl $VM_MAXUSER_ADDRESS-1,%edx /* verify address validity */
+ ja fusufault
+
+ movb 8(%esp),%al
+ movb %al,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+/*
+ * copyoutstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+ENTRY(copyoutstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx) /* XXX rename copyoutstr_fault */
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ cld
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 5f
+#endif /* I486_CPU || I586_CPU */
+
+1:
+ /*
+ * It suffices to check that the first byte is in user space, because
+ * we look at a page at a time and the end address is on a page
+ * boundary.
+ */
+ cmpl $VM_MAXUSER_ADDRESS-1,%edi
+ ja cpystrflt
+
+ movl %edi,%eax
+ shrl $IDXSHIFT,%eax
+ andb $0xfc,%al
+ movb _PTmap(%eax),%al
+ andb $7,%al
+ cmpb $7,%al
+ je 2f
+
+ /* simulate trap */
+ pushl %edx
+ pushl %edi
+ call _trapwrite
+ cld
+ popl %edi
+ popl %edx
+ orl %eax,%eax
+ jnz cpystrflt
+
+2: /* copy up to end of this page */
+ movl %edi,%eax
+ andl $NBPG-1,%eax
+ movl $NBPG,%ecx
+ subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
+ cmpl %ecx,%edx
+ jae 3f
+ movl %edx,%ecx /* ecx = min(ecx, edx) */
+3:
+ orl %ecx,%ecx
+ jz 4f
+ decl %ecx
+ decl %edx
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 3b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+
+4: /* next page */
+ orl %edx,%edx
+ jnz 1b
+
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp cpystrflt_x
+#endif /* I386_CPU */
+
+#if defined(I486_CPU) || defined(I586_CPU)
+5:
+ incl %edx
+1:
+ decl %edx
+ jz 2f
+ /*
+ * XXX - would be faster to rewrite this function to use
+ * strlen() and copyout().
+ */
+ cmpl $VM_MAXUSER_ADDRESS-1,%edi
+ ja cpystrflt
+
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp cpystrflt_x
+2:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp cpystrflt_x
+
+#endif /* I486_CPU || I586_CPU */
+
+/*
+ * This was split from copyinstr_fault mainly because pushing gs changes the
+ * stack offsets. It's better to have it separate for mcounting too.
+ */
+cpystrflt:
+ movl $EFAULT,%eax
+cpystrflt_x:
+ /* set *lencopied and return %eax */
+ movl _curpcb,%ecx
+ movl $0,PCB_ONFAULT(%ecx)
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 1f
+ movl %ecx,(%edx)
+1:
+ popl %edi
+ popl %esi
+ ret
+
+
+/*
+ * copyinstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+ENTRY(copyinstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $copyinstr_fault,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ /*
+ * XXX should avoid touching gs. Either copy the string in and
+ * check the bounds later or get its length and check the bounds
+ * and then use copyin().
+ */
+ pushl %gs
+ movl __udatasel,%eax
+ movl %ax,%gs
+ incl %edx
+ cld
+1:
+ decl %edx
+ jz 2f
+ gs
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 3f
+2:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 3f
+
+ ALIGN_TEXT
+copyinstr_fault:
+ movl $EFAULT,%eax
+3:
+ /* set *lencopied and return %eax */
+ movl _curpcb,%ecx
+ movl $0,PCB_ONFAULT(%ecx)
+ movl 24(%esp),%ecx
+ subl %edx,%ecx
+ movl 28(%esp),%edx
+ orl %edx,%edx
+ jz 4f
+ movl %ecx,(%edx)
+4:
+ popl %gs
+ popl %edi
+ popl %esi
+ ret
+
+
+/*
+ * copystr(from, to, maxlen, int *lencopied)
+ */
+ENTRY(copystr)
+ pushl %esi
+ pushl %edi
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+ cld
+1:
+ decl %edx
+ jz 4f
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+
+6:
+ /* set *lencopied and return %eax */
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 7f
+ movl %ecx,(%edx)
+7:
+ popl %edi
+ popl %esi
+ ret
+
+/*
+ * Handling of special 386 registers and descriptor tables etc
+ */
+/* void lgdt(struct region_descriptor *rdp); */
+ENTRY(lgdt)
+ /* reload the descriptor table */
+ movl 4(%esp),%eax
+ lgdt (%eax)
+
+ /* flush the prefetch q */
+ jmp 1f
+ nop
+1:
+ /* reload "stale" selectors */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ movl %ax,%ss
+
+ /* reload code selector by turning return into intersegmental return */
+ movl (%esp),%eax
+ pushl %eax
+# movl $KCSEL,4(%esp)
+ movl $8,4(%esp)
+ lret
+
+/*
+ * void lidt(struct region_descriptor *rdp);
+ */
+ENTRY(lidt)
+ movl 4(%esp),%eax
+ lidt (%eax)
+ ret
+
+/*
+ * void lldt(u_short sel)
+ */
+ENTRY(lldt)
+ lldt 4(%esp)
+ ret
+
+/*
+ * void ltr(u_short sel)
+ */
+ENTRY(ltr)
+ ltr 4(%esp)
+ ret
+
+/* ssdtosd(*ssdp,*sdp) */
+ENTRY(ssdtosd)
+ pushl %ebx
+ movl 8(%esp),%ecx
+ movl 8(%ecx),%ebx
+ shll $16,%ebx
+ movl (%ecx),%edx
+ roll $16,%edx
+ movb %dh,%bl
+ movb %dl,%bh
+ rorl $8,%ebx
+ movl 4(%ecx),%eax
+ movw %ax,%dx
+ andl $0xf0000,%eax
+ orl %eax,%ebx
+ movl 12(%esp),%ecx
+ movl %edx,(%ecx)
+ movl %ebx,4(%ecx)
+ popl %ebx
+ ret
+
+/* load_cr0(cr0) */
+ENTRY(load_cr0)
+ movl 4(%esp),%eax
+ movl %eax,%cr0
+ ret
+
+/* rcr0() */
+ENTRY(rcr0)
+ movl %cr0,%eax
+ ret
+
+/* rcr3() */
+ENTRY(rcr3)
+ movl %cr3,%eax
+ ret
+
+/* void load_cr3(caddr_t cr3) */
+ENTRY(load_cr3)
+ movl 4(%esp),%eax
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+
+/*****************************************************************************/
+/* setjump, longjump */
+/*****************************************************************************/
+
+ENTRY(setjmp)
+ movl 4(%esp),%eax
+ movl %ebx,(%eax) /* save ebx */
+ movl %esp,4(%eax) /* save esp */
+ movl %ebp,8(%eax) /* save ebp */
+ movl %esi,12(%eax) /* save esi */
+ movl %edi,16(%eax) /* save edi */
+ movl (%esp),%edx /* get rta */
+ movl %edx,20(%eax) /* save eip */
+ xorl %eax,%eax /* return(0); */
+ ret
+
+ENTRY(longjmp)
+ movl 4(%esp),%eax
+ movl (%eax),%ebx /* restore ebx */
+ movl 4(%eax),%esp /* restore esp */
+ movl 8(%eax),%ebp /* restore ebp */
+ movl 12(%eax),%esi /* restore esi */
+ movl 16(%eax),%edi /* restore edi */
+ movl 20(%eax),%edx /* get rta */
+ movl %edx,(%esp) /* put in return frame */
+ xorl %eax,%eax /* return(1); */
+ incl %eax
+ ret
diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s
new file mode 100644
index 0000000..c4e37df
--- /dev/null
+++ b/sys/amd64/amd64/support.s
@@ -0,0 +1,1221 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: support.s,v 1.10 1994/06/06 14:23:49 davidg Exp $
+ */
+
+#include "assym.s" /* system definitions */
+#include "errno.h" /* error return codes */
+#include "machine/asmacros.h" /* miscellaneous asm macros */
+#include "machine/cputypes.h" /* types of CPUs */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define IDXSHIFT 10
+
+/*
+ * Support routines for GCC, general C-callable functions
+ */
+ENTRY(__udivsi3)
+ movl 4(%esp),%eax
+ xorl %edx,%edx
+ divl 8(%esp)
+ ret
+
+ENTRY(__divsi3)
+ movl 4(%esp),%eax
+ cltd
+ idivl 8(%esp)
+ ret
+
+ /*
+ * I/O bus instructions via C
+ */
+ENTRY(inb) /* val = inb(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ inb %dx,%al
+ NOP
+ ret
+
+ENTRY(inw) /* val = inw(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ inw %dx,%ax
+ NOP
+ ret
+
+ENTRY(insb) /* insb(port, addr, cnt) */
+ pushl %edi
+ movl 8(%esp),%edx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ insb
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(insw) /* insw(port, addr, cnt) */
+ pushl %edi
+ movl 8(%esp),%edx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ insw
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(insl) /* insl(port, addr, cnt) */
+ pushl %edi
+ movl 8(%esp),%edx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ insl
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(rtcin) /* rtcin(val) */
+ movl 4(%esp),%eax
+ outb %al,$0x70
+ NOP
+ xorl %eax,%eax
+ inb $0x71,%al
+ NOP
+ ret
+
+ENTRY(outb) /* outb(port, val) */
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ outb %al,%dx
+ NOP
+ ret
+
+ENTRY(outw) /* outw(port, val) */
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ outw %ax,%dx
+ NOP
+ ret
+
+ENTRY(outsb) /* outsb(port, addr, cnt) */
+ pushl %esi
+ movl 8(%esp),%edx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ outsb
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ENTRY(outsw) /* outsw(port, addr, cnt) */
+ pushl %esi
+ movl 8(%esp),%edx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ outsw
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ENTRY(outsl) /* outsl(port, addr, cnt) */
+ pushl %esi
+ movl 8(%esp),%edx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ outsl
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+/*
+ * bcopy family
+ */
+
+/*
+ * void bzero(void *base, u_int cnt)
+ * Special code for I486 because stosl uses lots
+ * of clocks. Makes little or no difference on DX2 type
+ * machines, but stosl is about 1/2 as fast as
+ * memory moves on a standard DX !!!!!
+ */
+ALTENTRY(blkclr)
+ENTRY(bzero)
+#if defined(I486_CPU)
+ cmpl $CPUCLASS_486,_cpu_class
+ jz 1f
+#endif
+
+ pushl %edi
+ movl 8(%esp),%edi
+ movl 12(%esp),%ecx
+ xorl %eax,%eax
+ shrl $2,%ecx
+ cld
+ rep
+ stosl
+ movl 12(%esp),%ecx
+ andl $3,%ecx
+ rep
+ stosb
+ popl %edi
+ ret
+
+#if defined(I486_CPU)
+ SUPERALIGN_TEXT
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%ecx
+ xorl %eax,%eax
+/
+/ do 64 byte chunks first
+/
+/ XXX this is probably over-unrolled at least for DX2's
+/
+2:
+ cmpl $64,%ecx
+ jb 3f
+ movl %eax,(%edx)
+ movl %eax,4(%edx)
+ movl %eax,8(%edx)
+ movl %eax,12(%edx)
+ movl %eax,16(%edx)
+ movl %eax,20(%edx)
+ movl %eax,24(%edx)
+ movl %eax,28(%edx)
+ movl %eax,32(%edx)
+ movl %eax,36(%edx)
+ movl %eax,40(%edx)
+ movl %eax,44(%edx)
+ movl %eax,48(%edx)
+ movl %eax,52(%edx)
+ movl %eax,56(%edx)
+ movl %eax,60(%edx)
+ addl $64,%edx
+ subl $64,%ecx
+ jnz 2b
+ ret
+
+/
+/ do 16 byte chunks
+/
+ SUPERALIGN_TEXT
+3:
+ cmpl $16,%ecx
+ jb 4f
+ movl %eax,(%edx)
+ movl %eax,4(%edx)
+ movl %eax,8(%edx)
+ movl %eax,12(%edx)
+ addl $16,%edx
+ subl $16,%ecx
+ jnz 3b
+ ret
+
+/
+/ do 4 byte chunks
+/
+ SUPERALIGN_TEXT
+4:
+ cmpl $4,%ecx
+ jb 5f
+ movl %eax,(%edx)
+ addl $4,%edx
+ subl $4,%ecx
+ jnz 4b
+ ret
+
+/
+/ do 1 byte chunks
+/ a jump table seems to be faster than a loop or more range reductions
+/
+/ XXX need a const section for non-text
+/
+ SUPERALIGN_TEXT
+jtab:
+ .long do0
+ .long do1
+ .long do2
+ .long do3
+
+ SUPERALIGN_TEXT
+5:
+ jmp jtab(,%ecx,4)
+
+ SUPERALIGN_TEXT
+do3:
+ movw %ax,(%edx)
+ movb %al,2(%edx)
+ ret
+
+ SUPERALIGN_TEXT
+do2:
+ movw %ax,(%edx)
+ ret
+
+ SUPERALIGN_TEXT
+do1:
+ movb %al,(%edx)
+
+ SUPERALIGN_TEXT
+do0:
+ ret
+#endif /* I486_CPU */
+
+/* fillw(pat, base, cnt) */
+ENTRY(fillw)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosw
+ popl %edi
+ ret
+
+/* filli(pat, base, cnt) */
+ENTRY(filli)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosl
+ popl %edi
+ ret
+
+ENTRY(bcopyb)
+bcopyb:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards. */
+ addl %ecx,%esi
+ std
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyw)
+bcopyw:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ shrl $1,%ecx /* copy by 16-bit words */
+ cld /* nope, copy forwards */
+ rep
+ movsw
+ adc %ecx,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ andl $1,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ std
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 16-bit words */
+ shrl $1,%ecx
+ decl %esi
+ decl %edi
+ rep
+ movsw
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyx)
+ movl 16(%esp),%eax
+ cmpl $2,%eax
+ je bcopyw /* not _bcopyw, to avoid multiple mcounts */
+ cmpl $4,%eax
+ je bcopy /* XXX the shared ret's break mexitcount */
+ jmp bcopyb
+
+/*
+ * (ov)bcopy(src, dst, cnt)
+ * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
+ */
+ALTENTRY(ovbcopy)
+ENTRY(bcopy)
+bcopy:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ shrl $2,%ecx /* copy by 32-bit words */
+ cld /* nope, copy forwards */
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ andl $3,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ std
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 32-bit words */
+ shrl $2,%ecx
+ subl $3,%esi
+ subl $3,%edi
+ rep
+ movsl
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ALTENTRY(ntohl)
+ENTRY(htonl)
+ movl 4(%esp),%eax
+#ifdef i486
+/* XXX */
+/* Since Gas 1.38 does not grok bswap this has been coded as the
+ * equivalent bytes. This can be changed back to bswap when we
+ * upgrade to a newer version of Gas
+ */
+ /* bswap %eax */
+ .byte 0x0f
+ .byte 0xc8
+#else
+ xchgb %al,%ah
+ roll $16,%eax
+ xchgb %al,%ah
+#endif
+ ret
+
+ALTENTRY(ntohs)
+ENTRY(htons)
+ movzwl 4(%esp),%eax
+ xchgb %al,%ah
+ ret
+
+/*****************************************************************************/
+/* copyout and fubyte family */
+/*****************************************************************************/
+/*
+ * Access user memory from inside the kernel. These routines and possibly
+ * the math- and DOS emulators should be the only places that do this.
+ *
+ * We have to access the memory with user's permissions, so use a segment
+ * selector with RPL 3. For writes to user space we have to additionally
+ * check the PTE for write permission, because the 386 does not check
+ * write permissions when we are executing with EPL 0. The 486 does check
+ * this if the WP bit is set in CR0, so we can use a simpler version here.
+ *
+ * These routines set curpcb->onfault for the time they execute. When a
+ * protection violation occurs inside the functions, the trap handler
+ * returns to *curpcb->onfault instead of the function.
+ */
+
+
+ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
+ movl _curpcb,%eax
+ movl $copyout_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ pushl %ebx
+ movl 16(%esp),%esi
+ movl 20(%esp),%edi
+ movl 24(%esp),%ebx
+ orl %ebx,%ebx /* anything to do? */
+ jz done_copyout
+
+ /*
+ * Check explicitly for non-user addresses. If 486 write protection
+ * is being used, this check is essential because we are in kernel
+ * mode so the h/w does not provide any protection against writing
+ * kernel addresses.
+ *
+ * Otherwise, it saves having to load and restore %es to get the
+ * usual segment-based protection (the destination segment for movs
+ * is always %es). The other explicit checks for user-writablility
+ * are not quite sufficient. They fail for the user area because
+ * we mapped the user area read/write to avoid having an #ifdef in
+ * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
+ * addresses including 0xff800000 and 0xfc000000). I'm not sure if
+ * this can be fixed. Marking the PTEs supervisor mode and the
+ * PDE's user mode would almost work, but there may be a problem
+ * with the self-referential PDE.
+ */
+ movl %edi,%eax
+ addl %ebx,%eax
+ jc copyout_fault
+/*
+ * XXX STOP USING VM_MAXUSER_ADDRESS.
+ * It is an end address, not a max, so every time it is used correctly it
+ * looks like there is an off by one error, and of course it caused an off
+ * by one error in several places.
+ */
+ cmpl $VM_MAXUSER_ADDRESS,%eax
+ ja copyout_fault
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 3f
+#endif
+/*
+ * We have to check each PTE for user write permission.
+ * The checking may cause a page fault, so it is important to set
+ * up everything for return via copyout_fault before here.
+ */
+ /* compute number of pages */
+ movl %edi,%ecx
+ andl $NBPG-1,%ecx
+ addl %ebx,%ecx
+ decl %ecx
+ shrl $IDXSHIFT+2,%ecx
+ incl %ecx
+
+ /* compute PTE offset for start address */
+ movl %edi,%edx
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+
+1: /* check PTE for each page */
+ movb _PTmap(%edx),%al
+ andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
+ cmpb $0x07,%al
+ je 2f
+
+ /* simulate a trap */
+ pushl %edx
+ pushl %ecx
+ shll $IDXSHIFT,%edx
+ pushl %edx
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+ popl %ecx
+ popl %edx
+
+ orl %eax,%eax /* if not ok, return EFAULT */
+ jnz copyout_fault
+
+2:
+ addl $4,%edx
+ decl %ecx
+ jnz 1b /* check next page */
+#endif /* I386_CPU */
+
+ /* bcopy(%esi, %edi, %ebx) */
+3:
+ movl %ebx,%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+ movb %bl,%cl
+ andb $3,%cl
+ rep
+ movsb
+
+done_copyout:
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyout_fault:
+ popl %ebx
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/* copyin(from_user, to_kernel, len) */
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $copyin_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi /* caddr_t from */
+ movl 16(%esp),%edi /* caddr_t to */
+ movl 20(%esp),%ecx /* size_t len */
+
+ /*
+ * make sure address is valid
+ */
+ movl %esi,%edx
+ addl %ecx,%edx
+ jc copyin_fault
+ cmpl $VM_MAXUSER_ADDRESS,%edx
+ ja copyin_fault
+
+ movb %cl,%al
+ shrl $2,%ecx /* copy longword-wise */
+ cld
+ rep
+ movsl
+ movb %al,%cl
+ andb $3,%cl /* copy remaining bytes */
+ rep
+ movsb
+
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyin_fault:
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/*
+ * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
+ */
+ALTENTRY(fuiword)
+ENTRY(fuword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx /* from */
+
+ cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
+ ja fusufault
+
+ movl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+/*
+ * These two routines are called from the profiling code, potentially
+ * at interrupt time. If they fail, that's okay, good things will
+ * happen later. Fail all the time for now - until the trap code is
+ * able to deal with this.
+ */
+ALTENTRY(suswintr)
+ENTRY(fuswintr)
+ movl $-1,%eax
+ ret
+
+ENTRY(fusword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+ cmpl $VM_MAXUSER_ADDRESS-2,%edx
+ ja fusufault
+
+ movzwl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(fuibyte)
+ENTRY(fubyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+ cmpl $VM_MAXUSER_ADDRESS-1,%edx
+ ja fusufault
+
+ movzbl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ ALIGN_TEXT
+fusufault:
+ movl _curpcb,%ecx
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ decl %eax
+ ret
+
+/*
+ * su{byte,sword,word}: write a byte (word, longword) to user memory
+ */
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 2f /* we only have to set the right segment selector */
+#endif /* I486_CPU || I586_CPU */
+
+ /* XXX - page boundary crossing is still not handled */
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx /* remove junk parameter from stack */
+ movl _curpcb,%ecx /* restore trashed register */
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+#endif
+
+2:
+ cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address validity */
+ ja fusufault
+
+ movl 8(%esp),%eax
+ movl %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 2f
+#endif /* I486_CPU || I586_CPU */
+
+ /* XXX - page boundary crossing is still not handled */
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx /* remove junk parameter from stack */
+ movl _curpcb,%ecx /* restore trashed register */
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+#endif
+
+2:
+ cmpl $VM_MAXUSER_ADDRESS-2,%edx /* verify address validity */
+ ja fusufault
+
+ movw 8(%esp),%ax
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 2f
+#endif /* I486_CPU || I586_CPU */
+
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx /* remove junk parameter from stack */
+ movl _curpcb,%ecx /* restore trashed register */
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+#endif
+
+2:
+ cmpl $VM_MAXUSER_ADDRESS-1,%edx /* verify address validity */
+ ja fusufault
+
+ movb 8(%esp),%al
+ movb %al,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+/*
+ * copyoutstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+ENTRY(copyoutstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx) /* XXX rename copyoutstr_fault */
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ cld
+
+#if defined(I386_CPU)
+
+#if defined(I486_CPU) || defined(I586_CPU)
+ cmpl $CPUCLASS_386,_cpu_class
+ jne 5f
+#endif /* I486_CPU || I586_CPU */
+
+1:
+ /*
+ * It suffices to check that the first byte is in user space, because
+ * we look at a page at a time and the end address is on a page
+ * boundary.
+ */
+ cmpl $VM_MAXUSER_ADDRESS-1,%edi
+ ja cpystrflt
+
+ movl %edi,%eax
+ shrl $IDXSHIFT,%eax
+ andb $0xfc,%al
+ movb _PTmap(%eax),%al
+ andb $7,%al
+ cmpb $7,%al
+ je 2f
+
+ /* simulate trap */
+ pushl %edx
+ pushl %edi
+ call _trapwrite
+ cld
+ popl %edi
+ popl %edx
+ orl %eax,%eax
+ jnz cpystrflt
+
+2: /* copy up to end of this page */
+ movl %edi,%eax
+ andl $NBPG-1,%eax
+ movl $NBPG,%ecx
+ subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
+ cmpl %ecx,%edx
+ jae 3f
+ movl %edx,%ecx /* ecx = min(ecx, edx) */
+3:
+ orl %ecx,%ecx
+ jz 4f
+ decl %ecx
+ decl %edx
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 3b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+
+4: /* next page */
+ orl %edx,%edx
+ jnz 1b
+
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp cpystrflt_x
+#endif /* I386_CPU */
+
+#if defined(I486_CPU) || defined(I586_CPU)
+5:
+ incl %edx
+1:
+ decl %edx
+ jz 2f
+ /*
+ * XXX - would be faster to rewrite this function to use
+ * strlen() and copyout().
+ */
+ cmpl $VM_MAXUSER_ADDRESS-1,%edi
+ ja cpystrflt
+
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp cpystrflt_x
+2:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp cpystrflt_x
+
+#endif /* I486_CPU || I586_CPU */
+
+/*
+ * This was split from copyinstr_fault mainly because pushing gs changes the
+ * stack offsets. It's better to have it separate for mcounting too.
+ */
+cpystrflt:
+ movl $EFAULT,%eax
+cpystrflt_x:
+ /* set *lencopied and return %eax */
+ movl _curpcb,%ecx
+ movl $0,PCB_ONFAULT(%ecx)
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 1f
+ movl %ecx,(%edx)
+1:
+ popl %edi
+ popl %esi
+ ret
+
+
+/*
+ * copyinstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+ENTRY(copyinstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $copyinstr_fault,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ /*
+ * XXX should avoid touching gs. Either copy the string in and
+ * check the bounds later or get its length and check the bounds
+ * and then use copyin().
+ */
+ pushl %gs
+ movl __udatasel,%eax
+ movl %ax,%gs
+ incl %edx
+ cld
+1:
+ decl %edx
+ jz 2f
+ gs
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 3f
+2:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 3f
+
+ ALIGN_TEXT
+copyinstr_fault:
+ movl $EFAULT,%eax
+3:
+ /* set *lencopied and return %eax */
+ movl _curpcb,%ecx
+ movl $0,PCB_ONFAULT(%ecx)
+ movl 24(%esp),%ecx
+ subl %edx,%ecx
+ movl 28(%esp),%edx
+ orl %edx,%edx
+ jz 4f
+ movl %ecx,(%edx)
+4:
+ popl %gs
+ popl %edi
+ popl %esi
+ ret
+
+
+/*
+ * copystr(from, to, maxlen, int *lencopied)
+ */
+ENTRY(copystr)
+ pushl %esi
+ pushl %edi
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+ cld
+1:
+ decl %edx
+ jz 4f
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+
+6:
+ /* set *lencopied and return %eax */
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 7f
+ movl %ecx,(%edx)
+7:
+ popl %edi
+ popl %esi
+ ret
+
+/*
+ * Handling of special 386 registers and descriptor tables etc
+ */
+/* void lgdt(struct region_descriptor *rdp); */
+ENTRY(lgdt)
+ /* reload the descriptor table */
+ movl 4(%esp),%eax
+ lgdt (%eax)
+
+ /* flush the prefetch q */
+ jmp 1f
+ nop
+1:
+ /* reload "stale" selectors */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ movl %ax,%ss
+
+ /* reload code selector by turning return into intersegmental return */
+ movl (%esp),%eax
+ pushl %eax
+# movl $KCSEL,4(%esp)
+ movl $8,4(%esp)
+ lret
+
+/*
+ * void lidt(struct region_descriptor *rdp);
+ */
+ENTRY(lidt)
+ movl 4(%esp),%eax
+ lidt (%eax)
+ ret
+
+/*
+ * void lldt(u_short sel)
+ */
+ENTRY(lldt)
+ lldt 4(%esp)
+ ret
+
+/*
+ * void ltr(u_short sel)
+ */
+ENTRY(ltr)
+ ltr 4(%esp)
+ ret
+
+/* ssdtosd(*ssdp,*sdp) */
+ENTRY(ssdtosd)
+ pushl %ebx
+ movl 8(%esp),%ecx
+ movl 8(%ecx),%ebx
+ shll $16,%ebx
+ movl (%ecx),%edx
+ roll $16,%edx
+ movb %dh,%bl
+ movb %dl,%bh
+ rorl $8,%ebx
+ movl 4(%ecx),%eax
+ movw %ax,%dx
+ andl $0xf0000,%eax
+ orl %eax,%ebx
+ movl 12(%esp),%ecx
+ movl %edx,(%ecx)
+ movl %ebx,4(%ecx)
+ popl %ebx
+ ret
+
+/* load_cr0(cr0) */
+ENTRY(load_cr0)
+ movl 4(%esp),%eax
+ movl %eax,%cr0
+ ret
+
+/* rcr0() */
+ENTRY(rcr0)
+ movl %cr0,%eax
+ ret
+
+/* rcr3() */
+ENTRY(rcr3)
+ movl %cr3,%eax
+ ret
+
+/* void load_cr3(caddr_t cr3) */
+ENTRY(load_cr3)
+ movl 4(%esp),%eax
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+
+/*****************************************************************************/
+/* setjump, longjump */
+/*****************************************************************************/
+
+ENTRY(setjmp)
+ movl 4(%esp),%eax
+ movl %ebx,(%eax) /* save ebx */
+ movl %esp,4(%eax) /* save esp */
+ movl %ebp,8(%eax) /* save ebp */
+ movl %esi,12(%eax) /* save esi */
+ movl %edi,16(%eax) /* save edi */
+ movl (%esp),%edx /* get rta */
+ movl %edx,20(%eax) /* save eip */
+ xorl %eax,%eax /* return(0); */
+ ret
+
+ENTRY(longjmp)
+ movl 4(%esp),%eax
+ movl (%eax),%ebx /* restore ebx */
+ movl 4(%eax),%esp /* restore esp */
+ movl 8(%eax),%ebp /* restore ebp */
+ movl 12(%eax),%esi /* restore esi */
+ movl 16(%eax),%edi /* restore edi */
+ movl 20(%eax),%edx /* get rta */
+ movl %edx,(%esp) /* put in return frame */
+ xorl %eax,%eax /* return(1); */
+ incl %eax
+ ret
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
new file mode 100644
index 0000000..aa8b5ba
--- /dev/null
+++ b/sys/amd64/amd64/swtch.s
@@ -0,0 +1,458 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: swtch.s,v 1.5 1994/04/02 07:00:30 davidg Exp $
+ */
+
+#include "npx.h" /* for NNPX */
+#include "assym.s" /* for preprocessor defines */
+#include "errno.h" /* for error codes */
+
+#include "machine/asmacros.h" /* for miscellaneous assembly macros */
+#define LOCORE /* XXX inhibit C declarations */
+#include "machine/spl.h" /* for SWI_AST_MASK ... */
+
+
+/*****************************************************************************/
+/* Scheduling */
+/*****************************************************************************/
+
+/*
+ * The following primitives manipulate the run queues.
+ * _whichqs tells which of the 32 queues _qs
+ * have processes in them. setrunqueue puts processes into queues, Remrq
+ * removes them from queues. The running process is on no queue,
+ * other processes are on a queue related to p->p_priority, divided by 4
+ * actually to shrink the 0-127 range of priorities into the 32 available
+ * queues.
+ */
+ .data
+ .globl _curpcb, _whichqs
+_curpcb: .long 0 /* pointer to curproc's PCB area */
+_whichqs: .long 0 /* which run queues have data */
+
+ .globl _qs,_cnt,_panic
+ .comm _noproc,4
+ .comm _runrun,4
+
+ .globl _want_resched
+_want_resched: .long 0 /* we need to re-run the scheduler */
+
+ .text
+/*
+ * setrunqueue(p)
+ *
+ * Call should be made at spl6(), and p->p_stat should be SRUN
+ */
+ENTRY(setrunqueue)
+ movl 4(%esp),%eax
+ cmpl $0,P_RLINK(%eax) /* should not be on q already */
+ je set1
+ pushl $set2
+ call _panic
+set1:
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btsl %edx,_whichqs /* set q full bit */
+ shll $3,%edx
+ addl $_qs,%edx /* locate q hdr */
+ movl %edx,P_LINK(%eax) /* link process on tail of q */
+ movl P_RLINK(%edx),%ecx
+ movl %ecx,P_RLINK(%eax)
+ movl %eax,P_RLINK(%edx)
+ movl %eax,P_LINK(%ecx)
+ ret
+
+set2: .asciz "setrunqueue"
+
+/*
+ * Remrq(p)
+ *
+ * Call should be made at spl6().
+ */
+ENTRY(remrq)
+ movl 4(%esp),%eax
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btrl %edx,_whichqs /* clear full bit, panic if clear already */
+ jb rem1
+ pushl $rem3
+ call _panic
+rem1:
+ pushl %edx
+ movl P_LINK(%eax),%ecx /* unlink process */
+ movl P_RLINK(%eax),%edx
+ movl %edx,P_RLINK(%ecx)
+ movl P_RLINK(%eax),%ecx
+ movl P_LINK(%eax),%edx
+ movl %edx,P_LINK(%ecx)
+ popl %edx
+ movl $_qs,%ecx
+ shll $3,%edx
+ addl %edx,%ecx
+ cmpl P_LINK(%ecx),%ecx /* q still has something? */
+ je rem2
+ shrl $3,%edx /* yes, set bit as still full */
+ btsl %edx,_whichqs
+rem2:
+ movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
+ ret
+
+rem3: .asciz "remrq"
+sw0: .asciz "cpu_switch"
+
+/*
+ * When no processes are on the runq, cpu_switch() branches to _idle
+ * to wait for something to come ready.
+ */
+ ALIGN_TEXT
+_idle:
+ MCOUNT
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3
+ movl $tmpstk-4,%esp
+ sti
+
+ /*
+ * XXX callers of cpu_switch() do a bogus splclock(). Locking should
+ * be left to cpu_switch().
+ */
+ movl $SWI_AST_MASK,_cpl
+ testl $~SWI_AST_MASK,_ipending
+ je idle_loop
+ call _splz
+
+ ALIGN_TEXT
+idle_loop:
+ cli
+ cmpl $0,_whichqs
+ jne sw1a
+ sti
+ hlt /* wait for interrupt */
+ jmp idle_loop
+
+badsw:
+ pushl $sw0
+ call _panic
+ /*NOTREACHED*/
+
+/*
+ * cpu_switch()
+ */
+ENTRY(cpu_switch)
+ incl _cnt+V_SWTCH
+
+ /* switch to new process. first, save context as needed */
+
+ movl _curproc,%ecx
+
+ /* if no process to save, don't bother */
+ testl %ecx,%ecx
+ je sw1
+
+ movl P_ADDR(%ecx),%ecx
+
+ movl (%esp),%eax /* Hardware registers */
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /* have we used fp, and need a save? */
+ mov _curproc,%eax
+ cmp %eax,_npxproc
+ jne 1f
+ pushl %ecx /* h/w bugs make saving complicated */
+ leal PCB_SAVEFPU(%ecx),%eax
+ pushl %eax
+ call _npxsave /* do it in a big C function */
+ popl %eax
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%eax /* save temporary map PTE */
+ movl %eax,PCB_CMAP2(%ecx) /* in our context */
+ movl $0,_curproc /* out of process */
+
+# movw _cpl,%ax
+# movw %ax,PCB_IML(%ecx) /* save ipl */
+
+ /* save is done, now choose a new process or idle */
+sw1:
+ cli
+sw1a:
+ movl _whichqs,%edi
+2:
+ /* XXX - bsf is sloow */
+ bsfl %edi,%eax /* find a full q */
+ je _idle /* if none, idle */
+
+ /* XX update whichqs? */
+ btrl %eax,%edi /* clear q full status */
+ jnb 2b /* if it was clear, look for another */
+ movl %eax,%ebx /* save which one we are using */
+
+ shll $3,%eax
+ addl $_qs,%eax /* select q */
+ movl %eax,%esi
+
+#ifdef DIAGNOSTIC
+ cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
+ je badsw /* not possible */
+#endif
+
+ movl P_LINK(%eax),%ecx /* unlink from front of process q */
+ movl P_LINK(%ecx),%edx
+ movl %edx,P_LINK(%eax)
+ movl P_RLINK(%ecx),%eax
+ movl %eax,P_RLINK(%edx)
+
+ cmpl P_LINK(%ecx),%esi /* q empty */
+ je 3f
+ btsl %ebx,%edi /* nope, set to indicate full */
+3:
+ movl %edi,_whichqs /* update q status */
+
+ movl $0,%eax
+ movl %eax,_want_resched
+
+#ifdef DIAGNOSTIC
+ cmpl %eax,P_WCHAN(%ecx)
+ jne badsw
+ cmpb $SRUN,P_STAT(%ecx)
+ jne badsw
+#endif
+
+ movl %eax,P_RLINK(%ecx) /* isolate process to run */
+ movl P_ADDR(%ecx),%edx
+ movl PCB_CR3(%edx),%ebx
+
+ /* switch address space */
+ movl %ebx,%cr3
+
+ /* restore context */
+ movl PCB_EBX(%edx),%ebx
+ movl PCB_ESP(%edx),%esp
+ movl PCB_EBP(%edx),%ebp
+ movl PCB_ESI(%edx),%esi
+ movl PCB_EDI(%edx),%edi
+ movl PCB_EIP(%edx),%eax
+ movl %eax,(%esp)
+
+ movl PCB_CMAP2(%edx),%eax /* get temporary map */
+ movl %eax,_CMAP2 /* reload temporary map PTE */
+
+ movl %ecx,_curproc /* into next process */
+ movl %edx,_curpcb
+
+#ifdef USER_LDT
+ cmpl $0, PCB_USERLDT(%edx)
+ jnz 1f
+ movl __default_ldt,%eax
+ cmpl _currentldt,%eax
+ je 2f
+ lldt __default_ldt
+ movl %eax,_currentldt
+ jmp 2f
+1: pushl %edx
+ call _set_user_ldt
+ popl %edx
+2:
+#endif
+
+ pushl %edx /* save p to return */
+/*
+ * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
+ * I think restoring the cpl is unnecessary, but we must turn off the cli
+ * now that spl*() don't do it as a side affect.
+ */
+ pushl PCB_IML(%edx)
+ sti
+#if 0
+ call _splx
+#endif
+ addl $4,%esp
+/*
+ * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
+ * same way. Better return a value.
+ */
+ popl %eax /* return(p); */
+ ret
+
+ENTRY(mvesp)
+ movl %esp,%eax
+ ret
+/*
+ * struct proc *swtch_to_inactive(struct proc *p);
+ *
+ * At exit of a process, move off the address space of the
+ * process and onto a "safe" one. Then, on a temporary stack
+ * return and run code that disposes of the old state.
+ * Since this code requires a parameter from the "old" stack,
+ * pass it back as a return value.
+ */
+ENTRY(swtch_to_inactive)
+ popl %edx /* old pc */
+ popl %eax /* arg, our return value */
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3 /* good bye address space */
+ #write buffer?
+ movl $tmpstk-4,%esp /* temporary stack, compensated for call */
+ MEXITCOUNT
+ jmp %edx /* return, execute remainder of cleanup */
+
+/*
+ * savectx(pcb, altreturn)
+ * Update pcb, saving current processor state and arranging
+ * for alternate return ala longjmp in cpu_switch if altreturn is true.
+ */
+ENTRY(savectx)
+ movl 4(%esp),%ecx
+ movw _cpl,%ax
+ movw %ax,PCB_IML(%ecx)
+ movl (%esp),%eax
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /*
+ * If npxproc == NULL, then the npx h/w state is irrelevant and the
+ * state had better already be in the pcb. This is true for forks
+ * but not for dumps (the old book-keeping with FP flags in the pcb
+ * always lost for dumps because the dump pcb has 0 flags).
+ *
+ * If npxproc != NULL, then we have to save the npx h/w state to
+ * npxproc's pcb and copy it to the requested pcb, or save to the
+ * requested pcb and reload. Copying is easier because we would
+ * have to handle h/w bugs for reloading. We used to lose the
+ * parent's npx state for forks by forgetting to reload.
+ */
+ mov _npxproc,%eax
+ testl %eax,%eax
+ je 1f
+
+ pushl %ecx
+ movl P_ADDR(%eax),%eax
+ leal PCB_SAVEFPU(%eax),%eax
+ pushl %eax
+ pushl %eax
+ call _npxsave
+ popl %eax
+ popl %eax
+ popl %ecx
+
+ pushl %ecx
+ pushl $108+8*2 /* XXX h/w state size + padding */
+ leal PCB_SAVEFPU(%ecx),%ecx
+ pushl %ecx
+ pushl %eax
+ call _bcopy
+ addl $12,%esp
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%edx /* save temporary map PTE */
+ movl %edx,PCB_CMAP2(%ecx) /* in our context */
+
+ cmpl $0,8(%esp)
+ je 1f
+ movl %esp,%edx /* relocate current sp relative to pcb */
+ subl $_kstack,%edx /* (sp is relative to kstack): */
+ addl %edx,%ecx /* pcb += sp - kstack; */
+ movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
+
+/* this mess deals with replicating register state gcc hides */
+ movl 12(%esp),%eax
+ movl %eax,12(%ecx)
+ movl 16(%esp),%eax
+ movl %eax,16(%ecx)
+ movl 20(%esp),%eax
+ movl %eax,20(%ecx)
+ movl 24(%esp),%eax
+ movl %eax,24(%ecx)
+1:
+ xorl %eax,%eax /* return 0 */
+ ret
+
+/*
+ * addupc(int pc, struct uprof *up, int ticks):
+ * update profiling information for the user process.
+ */
+ENTRY(addupc)
+ pushl %ebp
+ movl %esp,%ebp
+ movl 12(%ebp),%edx /* up */
+ movl 8(%ebp),%eax /* pc */
+
+ subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
+ jb L1 /* if (pc was < off) return */
+
+ shrl $1,%eax /* praddr = pc >> 1 */
+ imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
+ shrl $15,%eax /* praddr = praddr << 15 */
+ andl $-2,%eax /* praddr &= ~1 */
+
+ cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
+ ja L1
+
+/* addl %eax,%eax /* praddr -> word offset */
+ addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
+ movl 16(%ebp),%ecx /* ticks */
+
+ movl _curpcb,%edx
+ movl $proffault,PCB_ONFAULT(%edx)
+ addl %ecx,(%eax) /* storage location += ticks */
+ movl $0,PCB_ONFAULT(%edx)
+L1:
+ leave
+ ret
+
+ ALIGN_TEXT
+proffault:
+ /* if we get a fault, then kill profiling all together */
+ movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
+ movl 12(%ebp),%ecx
+ movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
+ leave
+ ret
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
new file mode 100644
index 0000000..92758ad
--- /dev/null
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -0,0 +1,328 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
+ * $Id: sys_machdep.c,v 1.3 1993/10/16 14:15:10 rgrimes Exp $
+ */
+
+#include "param.h"
+#include "systm.h"
+#include "ioctl.h"
+#include "file.h"
+#include "time.h"
+#include "proc.h"
+#include "uio.h"
+#include "kernel.h"
+#include "mtio.h"
+#include "buf.h"
+#include "trace.h"
+
+#ifdef USER_LDT
+#include "user.h"
+#include "machine/cpu.h"
+#include "machine/sysarch.h"
+#include "vm/vm_kern.h" /* for kernel_map */
+#endif
+
+#ifdef TRACE
+int nvualarm;
+
+struct vtrace_args {
+ int request;
+ int value;
+};
+
+vtrace(p, uap, retval)
+ struct proc *p;
+ register struct vtrace_args *uap;
+ int *retval;
+{
+ int vdoualarm();
+
+ switch (uap->request) {
+
+ case VTR_DISABLE: /* disable a trace point */
+ case VTR_ENABLE: /* enable a trace point */
+ if (uap->value < 0 || uap->value >= TR_NFLAGS)
+ return (EINVAL);
+ *retval = traceflags[uap->value];
+ traceflags[uap->value] = uap->request;
+ break;
+
+ case VTR_VALUE: /* return a trace point setting */
+ if (uap->value < 0 || uap->value >= TR_NFLAGS)
+ return (EINVAL);
+ *retval = traceflags[uap->value];
+ break;
+
+ case VTR_UALARM: /* set a real-time ualarm, less than 1 min */
+ if (uap->value <= 0 || uap->value > 60 * hz || nvualarm > 5)
+ return (EINVAL);
+ nvualarm++;
+ timeout(vdoualarm, (caddr_t)p->p_pid, uap->value);
+ break;
+
+ case VTR_STAMP:
+ trace(TR_STAMP, uap->value, p->p_pid);
+ break;
+ }
+ return (0);
+}
+
+vdoualarm(arg)
+ int arg;
+{
+ register struct proc *p;
+
+ p = pfind(arg);
+ if (p)
+ psignal(p, 16);
+ nvualarm--;
+}
+#endif
+
+#ifdef USER_LDT
+void
+set_user_ldt(struct pcb *pcb)
+{
+ gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)pcb->pcb_ldt;
+ gdt_segs[GUSERLDT_SEL].ssd_limit = (pcb->pcb_ldt_len * sizeof(union descriptor)) - 1;
+ ssdtosd(gdt_segs+GUSERLDT_SEL, gdt+GUSERLDT_SEL);
+ lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
+ currentldt = GSEL(GUSERLDT_SEL, SEL_KPL);
+}
+
+struct i386_get_ldt_args {
+ int start;
+ union descriptor *desc;
+ int num;
+};
+
+int
+i386_get_ldt(p, args, retval)
+ struct proc *p;
+ char *args;
+ int *retval;
+{
+ int error = 0;
+ struct pcb *pcb = &p->p_addr->u_pcb;
+ int nldt, num;
+ union descriptor *lp;
+ int s;
+ struct i386_get_ldt_args ua, *uap;
+
+ if ((error = copyin(args, &ua, sizeof(struct i386_get_ldt_args))) < 0)
+ return(error);
+
+ uap = &ua;
+#ifdef DEBUG
+ printf("i386_get_ldt: start=%d num=%d descs=%x\n", uap->start, uap->num, uap->desc);
+#endif
+
+ if (uap->start < 0 || uap->num < 0)
+ return(EINVAL);
+
+ s = splhigh();
+
+ if (pcb->pcb_ldt) {
+ nldt = pcb->pcb_ldt_len;
+ num = min(uap->num, nldt);
+ lp = &((union descriptor *)(pcb->pcb_ldt))[uap->start];
+ } else {
+ nldt = sizeof(ldt)/sizeof(ldt[0]);
+ num = min(uap->num, nldt);
+ lp = &ldt[uap->start];
+ }
+ if (uap->start > nldt) {
+ splx(s);
+ return(EINVAL);
+ }
+
+ error = copyout(lp, uap->desc, num * sizeof(union descriptor));
+ if (!error)
+ *retval = num;
+
+ splx(s);
+ return(error);
+}
+
+struct i386_set_ldt_args {
+ int start;
+ union descriptor *desc;
+ int num;
+};
+
+int
+i386_set_ldt(p, args, retval)
+ struct proc *p;
+ char *args;
+ int *retval;
+{
+ int error = 0, i, n;
+ struct pcb *pcb = &p->p_addr->u_pcb;
+ union descriptor *lp;
+ int s;
+ struct i386_set_ldt_args ua, *uap;
+
+ if ((error = copyin(args, &ua, sizeof(struct i386_set_ldt_args))) < 0)
+ return(error);
+
+ uap = &ua;
+
+#ifdef DEBUG
+ printf("i386_set_ldt: start=%d num=%d descs=%x\n", uap->start, uap->num, uap->desc);
+#endif
+
+ if (uap->start < 0 || uap->num < 0)
+ return(EINVAL);
+
+ /* XXX Should be 8192 ! */
+ if (uap->start > 512 ||
+ (uap->start + uap->num) > 512)
+ return(EINVAL);
+
+ /* allocate user ldt */
+ if (!pcb->pcb_ldt) {
+ union descriptor *new_ldt =
+ (union descriptor *)kmem_alloc(kernel_map, 512*sizeof(union descriptor));
+ bzero(new_ldt, 512*sizeof(union descriptor));
+ bcopy(ldt, new_ldt, sizeof(ldt));
+ pcb->pcb_ldt = (caddr_t)new_ldt;
+ pcb->pcb_ldt_len = 512; /* XXX need to grow */
+#ifdef DEBUG
+ printf("i386_set_ldt(%d): new_ldt=%x\n", p->p_pid, new_ldt);
+#endif
+ }
+
+ /* Check descriptors for access violations */
+ for (i = 0, n = uap->start; i < uap->num; i++, n++) {
+ union descriptor desc, *dp;
+ dp = &uap->desc[i];
+ error = copyin(dp, &desc, sizeof(union descriptor));
+ if (error)
+ return(error);
+
+ /* Only user (ring-3) descriptors */
+ if (desc.sd.sd_dpl != SEL_UPL)
+ return(EACCES);
+
+ /* Must be "present" */
+ if (desc.sd.sd_p == 0)
+ return(EACCES);
+
+ switch (desc.sd.sd_type) {
+ case SDT_SYSNULL:
+ case SDT_SYS286CGT:
+ case SDT_SYS386CGT:
+ break;
+ case SDT_MEMRO:
+ case SDT_MEMROA:
+ case SDT_MEMRW:
+ case SDT_MEMRWA:
+ case SDT_MEMROD:
+ case SDT_MEMRODA:
+ case SDT_MEME:
+ case SDT_MEMEA:
+ case SDT_MEMER:
+ case SDT_MEMERA:
+ case SDT_MEMEC:
+ case SDT_MEMEAC:
+ case SDT_MEMERC:
+ case SDT_MEMERAC: {
+#if 0
+ unsigned long base = (desc.sd.sd_hibase << 24)&0xFF000000;
+ base |= (desc.sd.sd_lobase&0x00FFFFFF);
+ if (base >= KERNBASE)
+ return(EACCES);
+#endif
+ break;
+ }
+ default:
+ return(EACCES);
+ /*NOTREACHED*/
+ }
+ }
+
+ s = splhigh();
+
+ /* Fill in range */
+ for (i = 0, n = uap->start; i < uap->num && !error; i++, n++) {
+ union descriptor desc, *dp;
+ dp = &uap->desc[i];
+ lp = &((union descriptor *)(pcb->pcb_ldt))[n];
+#ifdef DEBUG
+ printf("i386_set_ldt(%d): ldtp=%x\n", p->p_pid, lp);
+#endif
+ error = copyin(dp, lp, sizeof(union descriptor));
+ }
+ if (!error) {
+ *retval = uap->start;
+/* need_resched(); */
+ }
+
+ splx(s);
+ return(error);
+}
+#endif /* USER_LDT */
+
+struct sysarch_args {
+ int op;
+ char *parms;
+};
+
+int
+sysarch(p, uap, retval)
+ struct proc *p;
+ register struct sysarch_args *uap;
+ int *retval;
+{
+ int error = 0;
+
+ switch(uap->op) {
+#ifdef USER_LDT
+ case I386_GET_LDT:
+ error = i386_get_ldt(p, uap->parms, retval);
+ break;
+
+ case I386_SET_LDT:
+ error = i386_set_ldt(p, uap->parms, retval);
+ break;
+#endif
+ default:
+ error = EINVAL;
+ break;
+ }
+ return(error);
+}
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
new file mode 100644
index 0000000..7084791
--- /dev/null
+++ b/sys/amd64/amd64/trap.c
@@ -0,0 +1,692 @@
+/*-
+ * Copyright (C) 1994, David Greenman
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the University of Utah, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
+ * $Id: trap.c,v 1.26 1994/06/11 05:13:33 davidg Exp $
+ */
+
+/*
+ * 386 Trap and System call handling
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/acct.h>
+#include <sys/kernel.h>
+#include <sys/syscall.h>
+#ifdef KTRACE
+#include <sys/ktrace.h>
+#endif
+
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+
+#include <machine/cpu.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <machine/eflags.h>
+
+#include <machine/trap.h>
+
+#include "isa.h"
+#include "npx.h"
+#include "ddb.h"
+
+int trap_pfault __P((struct trapframe *, int));
+void trap_fatal __P((struct trapframe *));
+
+extern int grow(struct proc *,u_int);
+
+struct sysent sysent[];
+int nsysent;
+
+#define MAX_TRAP_MSG 27
+char *trap_msg[] = {
+ "reserved addressing fault", /* 0 T_RESADFLT */
+ "privileged instruction fault", /* 1 T_PRIVINFLT */
+ "reserved operand fault", /* 2 T_RESOPFLT */
+ "breakpoint instruction fault", /* 3 T_BPTFLT */
+ "", /* 4 unused */
+ "system call trap", /* 5 T_SYSCALL */
+ "arithmetic trap", /* 6 T_ARITHTRAP */
+ "system forced exception", /* 7 T_ASTFLT */
+ "segmentation (limit) fault", /* 8 T_SEGFLT */
+ "general protection fault", /* 9 T_PROTFLT */
+ "trace trap", /* 10 T_TRCTRAP */
+ "", /* 11 unused */
+ "page fault", /* 12 T_PAGEFLT */
+ "page table fault", /* 13 T_TABLEFLT */
+ "alignment fault", /* 14 T_ALIGNFLT */
+ "kernel stack pointer not valid", /* 15 T_KSPNOTVAL */
+ "bus error", /* 16 T_BUSERR */
+ "kernel debugger fault", /* 17 T_KDBTRAP */
+ "integer divide fault", /* 18 T_DIVIDE */
+ "non-maskable interrupt trap", /* 19 T_NMI */
+ "overflow trap", /* 20 T_OFLOW */
+ "FPU bounds check fault", /* 21 T_BOUND */
+ "FPU device not available", /* 22 T_DNA */
+ "double fault", /* 23 T_DOUBLEFLT */
+ "FPU operand fetch fault", /* 24 T_FPOPFLT */
+ "invalid TSS fault", /* 25 T_TSSFLT */
+ "segment not present fault", /* 26 T_SEGNPFLT */
+ "stack fault", /* 27 T_STKFLT */
+};
+
+static inline void
+userret(p, frame, oticks)
+ struct proc *p;
+ struct trapframe *frame;
+ u_quad_t oticks;
+{
+ int sig, s;
+
+ while (sig = CURSIG(p))
+ postsig(sig);
+ p->p_priority = p->p_usrpri;
+ if (want_resched) {
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we setrunqueue ourselves but before we
+ * mi_switch()'ed, we might not be on the queue indicated by
+ * our priority.
+ */
+ s = splclock();
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ mi_switch();
+ splx(s);
+ while (sig = CURSIG(p))
+ postsig(sig);
+ }
+ if (p->p_stats->p_prof.pr_scale) {
+ u_quad_t ticks = p->p_sticks - oticks;
+
+ if (ticks) {
+#ifdef PROFTIMER
+ extern int profscale;
+ addupc(frame->tf_eip, &p->p_stats->p_prof,
+ ticks * profscale);
+#else
+ addupc(frame->tf_eip, &p->p_stats->p_prof, ticks);
+#endif
+ }
+ }
+ curpriority = p->p_priority;
+}
+
+/*
+ * trap(frame):
+ * Exception, fault, and trap interface to the FreeBSD kernel.
+ * This common code is called from assembly language IDT gate entry
+ * routines that prepare a suitable stack frame, and restore this
+ * frame after the exception has been processed.
+ */
+
+/*ARGSUSED*/
+void
+trap(frame)
+ struct trapframe frame;
+{
+ struct proc *p = curproc;
+ u_quad_t sticks = 0;
+ int i = 0, ucode = 0, type, code, eva, fault_type;
+
+ frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */
+ type = frame.tf_trapno;
+ code = frame.tf_err;
+
+ if (ISPL(frame.tf_cs) == SEL_UPL) {
+ /* user trap */
+
+ sticks = p->p_sticks;
+ p->p_md.md_regs = (int *)&frame;
+
+ switch (type) {
+ case T_RESADFLT: /* reserved addressing fault */
+ case T_PRIVINFLT: /* privileged instruction fault */
+ case T_RESOPFLT: /* reserved operand fault */
+ ucode = type;
+ i = SIGILL;
+ break;
+
+ case T_BPTFLT: /* bpt instruction fault */
+ case T_TRCTRAP: /* trace trap */
+ frame.tf_eflags &= ~PSL_T;
+ i = SIGTRAP;
+ break;
+
+ case T_ARITHTRAP: /* arithmetic trap */
+ ucode = code;
+ i = SIGFPE;
+ break;
+
+ case T_ASTFLT: /* Allow process switch */
+ astoff();
+ cnt.v_soft++;
+ if ((p->p_flag & P_OWEUPC) && p->p_stats->p_prof.pr_scale) {
+ addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
+ p->p_flag &= ~P_OWEUPC;
+ }
+ goto out;
+
+ case T_PROTFLT: /* general protection fault */
+ case T_SEGNPFLT: /* segment not present fault */
+ case T_STKFLT: /* stack fault */
+ ucode = code + BUS_SEGM_FAULT ;
+ i = SIGBUS;
+ break;
+
+ case T_PAGEFLT: /* page fault */
+ i = trap_pfault(&frame, TRUE);
+ if (i == 0)
+ goto out;
+
+ ucode = T_PAGEFLT;
+ break;
+
+ case T_DIVIDE: /* integer divide fault */
+ ucode = FPE_INTDIV_TRAP;
+ i = SIGFPE;
+ break;
+
+#if NISA > 0
+ case T_NMI:
+#if NDDB > 0
+ /* NMI can be hooked up to a pushbutton for debugging */
+ printf ("NMI ... going to debugger\n");
+ if (kdb_trap (type, 0, &frame))
+ return;
+#endif
+ /* machine/parity/power fail/"kitchen sink" faults */
+ if (isa_nmi(code) == 0) return;
+ /* FALL THROUGH */
+#endif
+
+ case T_OFLOW: /* integer overflow fault */
+ ucode = FPE_INTOVF_TRAP;
+ i = SIGFPE;
+ break;
+
+ case T_BOUND: /* bounds check fault */
+ ucode = FPE_SUBRNG_TRAP;
+ i = SIGFPE;
+ break;
+
+ case T_DNA:
+#if NNPX > 0
+ /* if a transparent fault (due to context switch "late") */
+ if (npxdna())
+ return;
+#endif /* NNPX > 0 */
+
+#if defined(MATH_EMULATE) || defined(GPL_MATH_EMULATE)
+ i = math_emulate(&frame);
+ if (i == 0) return;
+#else /* MATH_EMULATE || GPL_MATH_EMULATE */
+ panic("trap: math emulation necessary!");
+#endif /* MATH_EMULATE || GPL_MATH_EMULATE */
+ ucode = FPE_FPU_NP_TRAP;
+ break;
+
+ case T_FPOPFLT: /* FPU operand fetch fault */
+ ucode = T_FPOPFLT;
+ i = SIGILL;
+ break;
+
+ default:
+ trap_fatal(&frame);
+ }
+ } else {
+ /* kernel trap */
+
+ switch (type) {
+ case T_PAGEFLT: /* page fault */
+ (void) trap_pfault(&frame, FALSE);
+ return;
+
+ case T_PROTFLT: /* general protection fault */
+ case T_SEGNPFLT: /* segment not present fault */
+ if (curpcb && curpcb->pcb_onfault) {
+ frame.tf_eip = (int)curpcb->pcb_onfault;
+ return;
+ }
+ break;
+
+#if NDDB > 0
+ case T_BPTFLT:
+ case T_TRCTRAP:
+ if (kdb_trap (type, 0, &frame))
+ return;
+ break;
+#else
+ case T_TRCTRAP: /* trace trap -- someone single stepping lcall's */
+ /* Q: how do we turn it on again? */
+ frame.tf_eflags &= ~PSL_T;
+ return;
+#endif
+
+#if NISA > 0
+ case T_NMI:
+#if NDDB > 0
+ /* NMI can be hooked up to a pushbutton for debugging */
+ printf ("NMI ... going to debugger\n");
+ if (kdb_trap (type, 0, &frame))
+ return;
+#endif
+ /* machine/parity/power fail/"kitchen sink" faults */
+ if (isa_nmi(code) == 0) return;
+ /* FALL THROUGH */
+#endif
+ }
+
+ trap_fatal(&frame);
+ }
+
+ trapsignal(p, i, ucode);
+
+#ifdef DIAGNOSTIC
+ eva = rcr2();
+ if (type <= MAX_TRAP_MSG) {
+ uprintf("fatal process exception: %s",
+ trap_msg[type]);
+ if ((type == T_PAGEFLT) || (type == T_PROTFLT))
+ uprintf(", fault VA = 0x%x", eva);
+ uprintf("\n");
+ }
+#endif
+
+out:
+ userret(p, &frame, sticks);
+}
+
+int
+trap_pfault(frame, usermode)
+ struct trapframe *frame;
+ int usermode;
+{
+ vm_offset_t va;
+ struct vmspace *vm;
+ vm_map_t map = 0;
+ int rv = 0, oldflags;
+ vm_prot_t ftype;
+ extern vm_map_t kernel_map;
+ int eva;
+ struct proc *p = curproc;
+
+ eva = rcr2();
+ va = trunc_page((vm_offset_t)eva);
+
+ /*
+ * Don't allow user-mode faults in kernel address space
+ */
+ if (usermode && (va >= KERNBASE)) {
+ goto nogo;
+ }
+
+ if ((p == 0) || (va >= KERNBASE)) {
+ vm = 0;
+ map = kernel_map;
+ } else {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ }
+
+ if (frame->tf_err & PGEX_W)
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+
+ if (map != kernel_map) {
+ vm_offset_t pa;
+ vm_offset_t v = (vm_offset_t) vtopte(va);
+ vm_page_t ptepg;
+
+ /*
+ * Keep swapout from messing with us during this
+ * critical time.
+ */
+ ++p->p_lock;
+
+ /*
+ * Grow the stack if necessary
+ */
+ if ((caddr_t)va > vm->vm_maxsaddr
+ && (caddr_t)va < (caddr_t)USRSTACK) {
+ if (!grow(p, va)) {
+ rv = KERN_FAILURE;
+ --p->p_lock;
+ goto nogo;
+ }
+ }
+
+ /*
+ * Check if page table is mapped, if not,
+ * fault it first
+ */
+
+ /* Fault the pte only if needed: */
+ *(volatile char *)v += 0;
+
+ ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
+ if( ptepg->hold_count == 0)
+ ptepg->act_count += 3;
+ vm_page_hold(ptepg);
+
+ /* Fault in the user page: */
+ rv = vm_fault(map, va, ftype, FALSE);
+
+ vm_page_unhold(ptepg);
+
+ /*
+ * page table pages don't need to be kept if they
+ * are not held
+ */
+ if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
+ pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
+ VM_PROT_NONE);
+ vm_page_free(ptepg);
+ }
+
+ --p->p_lock;
+ } else {
+ /*
+ * Since we know that kernel virtual address addresses
+ * always have pte pages mapped, we just have to fault
+ * the page.
+ */
+ rv = vm_fault(map, va, ftype, FALSE);
+ }
+
+ if (rv == KERN_SUCCESS)
+ return (0);
+nogo:
+ if (!usermode) {
+ if (curpcb->pcb_onfault) {
+ frame->tf_eip = (int)curpcb->pcb_onfault;
+ return (0);
+ }
+ trap_fatal(frame);
+ }
+
+ /* kludge to pass faulting virtual address to sendsig */
+ frame->tf_err = eva;
+
+ return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
+}
+
+void
+trap_fatal(frame)
+ struct trapframe *frame;
+{
+ int code, type, eva;
+
+ code = frame->tf_err;
+ type = frame->tf_trapno;
+ eva = rcr2();
+
+ if (type <= MAX_TRAP_MSG)
+ printf("\n\nFatal trap %d: %s while in %s mode\n",
+ type, trap_msg[type],
+ ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
+ if (type == T_PAGEFLT) {
+ printf("fault virtual address = 0x%x\n", eva);
+ printf("fault code = %s %s, %s\n",
+ code & PGEX_U ? "user" : "supervisor",
+ code & PGEX_W ? "write" : "read",
+ code & PGEX_P ? "protection violation" : "page not present");
+ }
+ printf("instruction pointer = 0x%x\n", frame->tf_eip);
+ printf("processor eflags = ");
+ if (frame->tf_eflags & EFL_TF)
+ printf("trace/trap, ");
+ if (frame->tf_eflags & EFL_IF)
+ printf("interrupt enabled, ");
+ if (frame->tf_eflags & EFL_NT)
+ printf("nested task, ");
+ if (frame->tf_eflags & EFL_RF)
+ printf("resume, ");
+ if (frame->tf_eflags & EFL_VM)
+ printf("vm86, ");
+ printf("IOPL = %d\n", (frame->tf_eflags & EFL_IOPL) >> 12);
+ printf("current process = ");
+ if (curproc) {
+ printf("%d (%s)\n",
+ curproc->p_pid, curproc->p_comm ?
+ curproc->p_comm : "");
+ } else {
+ printf("Idle\n");
+ }
+ printf("interrupt mask = ");
+ if ((cpl & net_imask) == net_imask)
+ printf("net ");
+ if ((cpl & tty_imask) == tty_imask)
+ printf("tty ");
+ if ((cpl & bio_imask) == bio_imask)
+ printf("bio ");
+ if (cpl == 0)
+ printf("none");
+ printf("\n");
+
+#ifdef KDB
+ if (kdb_trap(&psl))
+ return;
+#endif
+#if NDDB > 0
+ if (kdb_trap (type, 0, frame))
+ return;
+#endif
+ if (type <= MAX_TRAP_MSG)
+ panic(trap_msg[type]);
+ else
+ panic("unknown/reserved trap");
+}
+
+/*
+ * Compensate for 386 brain damage (missing URKR).
+ * This is a little simpler than the pagefault handler in trap() because
+ * it the page tables have already been faulted in and high addresses
+ * are thrown out early for other reasons.
+ */
+int trapwrite(addr)
+ unsigned addr;
+{
+ struct proc *p;
+ vm_offset_t va, v;
+ struct vmspace *vm;
+ int oldflags;
+ int rv;
+
+ va = trunc_page((vm_offset_t)addr);
+ /*
+ * XXX - MAX is END. Changed > to >= for temp. fix.
+ */
+ if (va >= VM_MAXUSER_ADDRESS)
+ return (1);
+
+ p = curproc;
+ vm = p->p_vmspace;
+
+ ++p->p_lock;
+
+ if ((caddr_t)va >= vm->vm_maxsaddr
+ && (caddr_t)va < (caddr_t)USRSTACK) {
+ if (!grow(p, va)) {
+ --p->p_lock;
+ return (1);
+ }
+ }
+
+ v = trunc_page(vtopte(va));
+
+ /*
+ * wire the pte page
+ */
+ if (va < USRSTACK) {
+ vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
+ }
+
+ /*
+ * fault the data page
+ */
+ rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
+
+ /*
+ * unwire the pte page
+ */
+ if (va < USRSTACK) {
+ vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
+ }
+
+ --p->p_lock;
+
+ if (rv != KERN_SUCCESS)
+ return 1;
+
+ return (0);
+}
+
+/*
+ * syscall(frame):
+ * System call request from POSIX system call gate interface to kernel.
+ * Like trap(), argument is call by reference.
+ */
+/*ARGSUSED*/
+void
+syscall(frame)
+ struct trapframe frame;
+{
+ caddr_t params;
+ int i;
+ struct sysent *callp;
+ struct proc *p = curproc;
+ u_quad_t sticks;
+ int error, opc;
+ int args[8], rval[2];
+ u_int code;
+
+ sticks = p->p_sticks;
+ if (ISPL(frame.tf_cs) != SEL_UPL)
+ panic("syscall");
+
+ code = frame.tf_eax;
+ p->p_md.md_regs = (int *)&frame;
+ params = (caddr_t)frame.tf_esp + sizeof (int) ;
+
+ /*
+ * Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
+ */
+ opc = frame.tf_eip - 7;
+ /*
+ * Need to check if this is a 32 bit or 64 bit syscall.
+ */
+ if (code == SYS_syscall) {
+ /*
+ * Code is first argument, followed by actual args.
+ */
+ code = fuword(params);
+ params += sizeof (int);
+ } else if (code == SYS___syscall) {
+ /*
+ * Like syscall, but code is a quad, so as to maintain
+ * quad alignment for the rest of the arguments.
+ */
+ code = fuword(params + _QUAD_LOWWORD * sizeof(int));
+ params += sizeof(quad_t);
+ }
+
+ if (code >= nsysent)
+ callp = &sysent[0];
+ else
+ callp = &sysent[code];
+
+ if ((i = callp->sy_narg * sizeof (int)) &&
+ (error = copyin(params, (caddr_t)args, (u_int)i))) {
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
+#endif
+ goto bad;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
+#endif
+ rval[0] = 0;
+ rval[1] = frame.tf_edx;
+
+ error = (*callp->sy_call)(p, args, rval);
+
+ switch (error) {
+
+ case 0:
+ /*
+ * Reinitialize proc pointer `p' as it may be different
+ * if this is a child returning from fork syscall.
+ */
+ p = curproc;
+ frame.tf_eax = rval[0];
+ frame.tf_edx = rval[1];
+ frame.tf_eflags &= ~PSL_C; /* carry bit */
+ break;
+
+ case ERESTART:
+ frame.tf_eip = opc;
+ break;
+
+ case EJUSTRETURN:
+ break;
+
+ default:
+ bad:
+ frame.tf_eax = error;
+ frame.tf_eflags |= PSL_C; /* carry bit */
+ break;
+ }
+
+ userret(p, &frame, sticks);
+
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p->p_tracep, code, error, rval[0]);
+#endif
+}
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
new file mode 100644
index 0000000..e40079a
--- /dev/null
+++ b/sys/amd64/amd64/tsc.c
@@ -0,0 +1,442 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz and Don Ahn.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)clock.c 7.2 (Berkeley) 5/12/91
+ * $Id: clock.c,v 1.6 1994/02/06 22:48:13 davidg Exp $
+ */
+
+/*
+ * Primitive clock interrupt routines.
+ */
+#include "param.h"
+#include "systm.h"
+#include "time.h"
+#include "kernel.h"
+#include "machine/segments.h"
+#include "machine/frame.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/isa.h"
+#include "i386/isa/rtc.h"
+#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
+
+/* X-tals being what they are, it's nice to be able to fudge this one... */
+/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
+#ifndef TIMER_FREQ
+#define TIMER_FREQ 1193182 /* XXX - should be in isa.h */
+#endif
+#define TIMER_DIV(x) ((TIMER_FREQ+(x)/2)/(x))
+
+void hardclock();
+static int beeping;
+int timer0_divisor = TIMER_DIV(100); /* XXX should be hz */
+u_int timer0_prescale;
+static char timer0_state = 0, timer2_state = 0;
+static char timer0_reprogram = 0;
+static void (*timer_func)() = hardclock;
+static void (*new_function)();
+static u_int new_rate;
+static u_int hardclock_divisor;
+
+
+void
+clkintr(frame)
+ struct clockframe frame;
+{
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
+ switch (timer0_state) {
+ case 0:
+ break;
+ case 1:
+ if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
+ hardclock(&frame);
+ timer0_prescale = 0;
+ }
+ break;
+ case 2:
+ disable_intr();
+ outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
+ outb(TIMER_CNTR0, TIMER_DIV(new_rate)%256);
+ outb(TIMER_CNTR0, TIMER_DIV(new_rate)/256);
+ enable_intr();
+ timer0_divisor = TIMER_DIV(new_rate);
+ timer0_prescale = 0;
+ timer_func = new_function;
+ timer0_state = 1;
+ break;
+ case 3:
+ if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
+ hardclock(&frame);
+ disable_intr();
+ outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
+ outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
+ outb(TIMER_CNTR0, TIMER_DIV(hz)/256);
+ enable_intr();
+ timer0_divisor = TIMER_DIV(hz);
+ timer0_prescale = 0;
+ timer_func = hardclock;;
+ timer0_state = 0;
+ }
+ break;
+ }
+}
+
+#endif
+
+int
+acquire_timer0(int rate, void (*function)() )
+{
+ if (timer0_state || !function)
+ return -1;
+
+ new_function = function;
+ new_rate = rate;
+ timer0_state = 2;
+ return 0;
+}
+
+
+int
+acquire_timer2(int mode)
+{
+ if (timer2_state)
+ return -1;
+ timer2_state = 1;
+ outb(TIMER_MODE, TIMER_SEL2 | (mode &0x3f));
+ return 0;
+}
+
+
+int
+release_timer0()
+{
+ if (!timer0_state)
+ return -1;
+ timer0_state = 3;
+ return 0;
+}
+
+
+int
+release_timer2()
+{
+ if (!timer2_state)
+ return -1;
+ timer2_state = 0;
+ outb(TIMER_MODE, TIMER_SEL2|TIMER_SQWAVE|TIMER_16BIT);
+ return 0;
+}
+
+
+static int
+getit()
+{
+ int high, low;
+
+ disable_intr();
+ /* select timer0 and latch counter value */
+ outb(TIMER_MODE, TIMER_SEL0);
+ low = inb(TIMER_CNTR0);
+ high = inb(TIMER_CNTR0);
+ enable_intr();
+ return ((high << 8) | low);
+}
+
+
+/*
+ * Wait "n" microseconds.
+ * Relies on timer 1 counting down from (TIMER_FREQ / hz)
+ * Note: timer had better have been programmed before this is first used!
+ */
+void
+DELAY(int n)
+{
+ int counter_limit, prev_tick, tick, ticks_left, sec, usec;
+
+#ifdef DELAYDEBUG
+ int getit_calls = 1;
+ int n1;
+ static int state = 0;
+
+ if (state == 0) {
+ state = 1;
+ for (n1 = 1; n1 <= 10000000; n1 *= 10)
+ DELAY(n1);
+ state = 2;
+ }
+ if (state == 1)
+ printf("DELAY(%d)...", n);
+#endif
+ /*
+ * Read the counter first, so that the rest of the setup overhead is
+ * counted. Guess the initial overhead is 20 usec (on most systems it
+ * takes about 1.5 usec for each of the i/o's in getit(). The loop
+ * takes about 6 usec on a 486/33 and 13 usec on a 386/20. The
+ * multiplications and divisions to scale the count take a while).
+ */
+ prev_tick = getit(0, 0);
+ n -= 20;
+ /*
+ * Calculate (n * (TIMER_FREQ / 1e6)) without using floating point
+ * and without any avoidable overflows.
+ */
+ sec = n / 1000000;
+ usec = n - sec * 1000000;
+ ticks_left = sec * TIMER_FREQ
+ + usec * (TIMER_FREQ / 1000000)
+ + usec * ((TIMER_FREQ % 1000000) / 1000) / 1000
+ + usec * (TIMER_FREQ % 1000) / 1000000;
+
+ while (ticks_left > 0) {
+ tick = getit(0, 0);
+#ifdef DELAYDEBUG
+ ++getit_calls;
+#endif
+ if (tick > prev_tick)
+ ticks_left -= prev_tick - (tick - timer0_divisor);
+ else
+ ticks_left -= prev_tick - tick;
+ prev_tick = tick;
+ }
+#ifdef DELAYDEBUG
+ if (state == 1)
+ printf(" %d calls to getit() at %d usec each\n",
+ getit_calls, (n + 5) / getit_calls);
+#endif
+}
+
+
+static void
+sysbeepstop()
+{
+ outb(IO_PPI, inb(IO_PPI)&0xFC); /* disable counter2 output to speaker */
+ release_timer2();
+ beeping = 0;
+}
+
+
+int
+sysbeep(int pitch, int period)
+{
+
+ if (acquire_timer2(TIMER_SQWAVE|TIMER_16BIT))
+ return -1;
+ disable_intr();
+ outb(TIMER_CNTR2, pitch);
+ outb(TIMER_CNTR2, (pitch>>8));
+ enable_intr();
+ if (!beeping) {
+ outb(IO_PPI, inb(IO_PPI) | 3); /* enable counter2 output to speaker */
+ beeping = period;
+ timeout(sysbeepstop, 0, period);
+ }
+ return 0;
+}
+
+
+void
+startrtclock()
+{
+ int s;
+
+ /* initialize 8253 clock */
+ outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
+
+ /* Correct rounding will buy us a better precision in timekeeping */
+ outb (IO_TIMER1, TIMER_DIV(hz)%256);
+ outb (IO_TIMER1, TIMER_DIV(hz)/256);
+ timer0_divisor = hardclock_divisor = TIMER_DIV(hz);
+
+ /* initialize brain-dead battery powered clock */
+ outb (IO_RTC, RTC_STATUSA);
+ outb (IO_RTC+1, 0x26);
+ outb (IO_RTC, RTC_STATUSB);
+ outb (IO_RTC+1, 2);
+
+ outb (IO_RTC, RTC_DIAG);
+ if (s = inb (IO_RTC+1))
+ printf("RTC BIOS diagnostic error %b\n", s, RTCDG_BITS);
+}
+
+
+/* convert 2 digit BCD number */
+int
+bcd(int i)
+{
+ return ((i/16)*10 + (i%16));
+}
+
+
+/* convert years to seconds (from 1970) */
+unsigned long
+ytos(int y)
+{
+ int i;
+ unsigned long ret;
+
+ ret = 0;
+ for(i = 1970; i < y; i++) {
+ if (i % 4) ret += 365*24*60*60;
+ else ret += 366*24*60*60;
+ }
+ return ret;
+}
+
+
+/* convert months to seconds */
+unsigned long
+mtos(int m, int leap)
+{
+ int i;
+ unsigned long ret;
+
+ ret = 0;
+ for(i=1; i<m; i++) {
+ switch(i){
+ case 1: case 3: case 5: case 7: case 8: case 10: case 12:
+ ret += 31*24*60*60; break;
+ case 4: case 6: case 9: case 11:
+ ret += 30*24*60*60; break;
+ case 2:
+ if (leap) ret += 29*24*60*60;
+ else ret += 28*24*60*60;
+ }
+ }
+ return ret;
+}
+
+
+/*
+ * Initialize the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+void
+inittodr(time_t base)
+{
+ unsigned long sec;
+ int leap, day_week, t, yd;
+ int sa,s;
+
+ /* do we have a realtime clock present? (otherwise we loop below) */
+ sa = rtcin(RTC_STATUSA);
+ if (sa == 0xff || sa == 0) return;
+
+ /* ready for a read? */
+ while ((sa&RTCSA_TUP) == RTCSA_TUP)
+ sa = rtcin(RTC_STATUSA);
+
+ sec = bcd(rtcin(RTC_YEAR)) + 1900;
+ if (sec < 1970)
+ sec += 100;
+
+ leap = !(sec % 4); sec = ytos(sec); /* year */
+ yd = mtos(bcd(rtcin(RTC_MONTH)),leap); sec+=yd; /* month */
+ t = (bcd(rtcin(RTC_DAY))-1) * 24*60*60; sec+=t; yd+=t; /* date */
+ day_week = rtcin(RTC_WDAY); /* day */
+ sec += bcd(rtcin(RTC_HRS)) * 60*60; /* hour */
+ sec += bcd(rtcin(RTC_MIN)) * 60; /* minutes */
+ sec += bcd(rtcin(RTC_SEC)); /* seconds */
+ sec += tz.tz_minuteswest * 60;
+ time.tv_sec = sec;
+}
+
+
+#ifdef garbage
+/*
+ * Initialze the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+test_inittodr(time_t base)
+{
+
+ outb(IO_RTC,9); /* year */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,8); /* month */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,7); /* day */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,4); /* hour */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,2); /* minutes */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,0); /* seconds */
+ printf("%d\n",bcd(inb(IO_RTC+1)));
+
+ time.tv_sec = base;
+}
+#endif
+
+/*
+ * Wire clock interrupt in.
+ */
+#define V(s) __CONCAT(V, s)
+extern void V(clk)();
+
+
+void
+enablertclock()
+{
+ setidt(ICU_OFFSET+0, &V(clk), SDT_SYS386IGT, SEL_KPL);
+ INTREN(IRQ0);
+}
+
+
+/*
+ * Delay for some number of milliseconds.
+ */
+void
+spinwait(int millisecs)
+{
+ DELAY(1000 * millisecs);
+}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
new file mode 100644
index 0000000..a7c4e59
--- /dev/null
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -0,0 +1,1246 @@
+/*-
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * Copyright (c) 1994 John Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
+ * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
+ * $Id: vm_machdep.c,v 1.20 1994/04/20 07:06:20 davidg Exp $
+ */
+
+#include "npx.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/buf.h>
+#include <sys/vnode.h>
+#include <sys/user.h>
+
+#include <machine/cpu.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+
+#define b_cylin b_resid
+
+caddr_t bouncememory;
+vm_offset_t bouncepa, bouncepaend;
+int bouncepages, bpwait;
+vm_map_t io_map;
+int bmwait, bmfreeing;
+
+#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
+int bounceallocarraysize;
+unsigned *bounceallocarray;
+int bouncefree;
+
+#define SIXTEENMEG (4096*4096)
+#define MAXBKVA 512
+int maxbkva=MAXBKVA*NBPG;
+
+/* special list that can be used at interrupt time for eventual kva free */
+struct kvasfree {
+ vm_offset_t addr;
+ vm_offset_t size;
+} kvaf[MAXBKVA];
+
+int kvasfreecnt;
+
+vm_offset_t vm_bounce_kva();
+/*
+ * get bounce buffer pages (count physically contiguous)
+ * (only 1 inplemented now)
+ */
+vm_offset_t
+vm_bounce_page_find(count)
+ int count;
+{
+ int bit;
+ int s,i;
+
+ if (count != 1)
+ panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
+
+ s = splbio();
+retry:
+ for (i = 0; i < bounceallocarraysize; i++) {
+ if (bounceallocarray[i] != 0xffffffff) {
+ if (bit = ffs(~bounceallocarray[i])) {
+ bounceallocarray[i] |= 1 << (bit - 1) ;
+ bouncefree -= count;
+ splx(s);
+ return bouncepa + (i * BITS_IN_UNSIGNED + (bit - 1)) * NBPG;
+ }
+ }
+ }
+ bpwait = 1;
+ tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
+ goto retry;
+}
+
+void
+vm_bounce_kva_free(addr, size, now)
+ vm_offset_t addr;
+ vm_offset_t size;
+ int now;
+{
+ int s = splbio();
+ kvaf[kvasfreecnt].addr = addr;
+ kvaf[kvasfreecnt++].size = size;
+ if( now) {
+ /*
+ * this will do wakeups
+ */
+ vm_bounce_kva(0,0);
+ } else {
+ if (bmwait) {
+ /*
+ * if anyone is waiting on the bounce-map, then wakeup
+ */
+ wakeup((caddr_t) io_map);
+ bmwait = 0;
+ }
+ }
+ splx(s);
+}
+
+/*
+ * free count bounce buffer pages
+ */
+void
+vm_bounce_page_free(pa, count)
+ vm_offset_t pa;
+ int count;
+{
+ int allocindex;
+ int index;
+ int bit;
+
+ if (count != 1)
+ panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n");
+
+ index = (pa - bouncepa) / NBPG;
+
+ if ((index < 0) || (index >= bouncepages))
+ panic("vm_bounce_page_free -- bad index\n");
+
+ allocindex = index / BITS_IN_UNSIGNED;
+ bit = index % BITS_IN_UNSIGNED;
+
+ bounceallocarray[allocindex] &= ~(1 << bit);
+
+ bouncefree += count;
+ if (bpwait) {
+ bpwait = 0;
+ wakeup((caddr_t) &bounceallocarray);
+ }
+}
+
+/*
+ * allocate count bounce buffer kva pages
+ */
+vm_offset_t
+vm_bounce_kva(count, waitok)
+ int count;
+ int waitok;
+{
+ int tofree;
+ int i;
+ int startfree;
+ vm_offset_t kva = 0;
+ int s = splbio();
+ int size = count;
+ startfree = 0;
+more:
+ if (!bmfreeing && (tofree = kvasfreecnt)) {
+ bmfreeing = 1;
+ for (i = startfree; i < kvasfreecnt; i++) {
+ /*
+ * if we have a kva of the right size, no sense
+ * in freeing/reallocating...
+ * might affect fragmentation short term, but
+ * as long as the amount of io_map is
+ * significantly more than the maximum transfer
+ * size, I don't think that it is a problem.
+ */
+ pmap_remove(kernel_pmap,
+ kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
+ if( size && !kva && kvaf[i].size == size) {
+ kva = kvaf[i].addr;
+ } else {
+ kmem_free_wakeup(io_map, kvaf[i].addr,
+ kvaf[i].size);
+ }
+ }
+ if (kvasfreecnt != tofree) {
+ startfree = i;
+ bmfreeing = 0;
+ goto more;
+ }
+ kvasfreecnt = 0;
+ bmfreeing = 0;
+ }
+
+ if( size == 0) {
+ splx(s);
+ return NULL;
+ }
+
+ if (!kva && !(kva = kmem_alloc_pageable(io_map, size))) {
+ if( !waitok) {
+ splx(s);
+ return NULL;
+ }
+ bmwait = 1;
+ tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0);
+ goto more;
+ }
+ splx(s);
+
+ return kva;
+}
+
+/*
+ * same as vm_bounce_kva -- but really allocate
+ */
+vm_offset_t
+vm_bounce_kva_alloc(count)
+int count;
+{
+ int i;
+ vm_offset_t kva;
+ vm_offset_t pa;
+ if( bouncepages == 0) {
+ kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK);
+ return kva;
+ }
+ kva = vm_bounce_kva(count, 1);
+ for(i=0;i<count;i++) {
+ pa = vm_bounce_page_find(1);
+ pmap_kenter(kva + i * NBPG, pa);
+ }
+ pmap_update();
+ return kva;
+}
+
+/*
+ * same as vm_bounce_kva_free -- but really free
+ */
+void
+vm_bounce_kva_alloc_free(kva, count)
+ vm_offset_t kva;
+ int count;
+{
+ int i;
+ vm_offset_t pa;
+ if( bouncepages == 0) {
+ free((caddr_t) kva, M_TEMP);
+ return;
+ }
+ for(i = 0; i < count; i++) {
+ pa = pmap_kextract(kva + i * NBPG);
+ vm_bounce_page_free(pa, 1);
+ }
+ vm_bounce_kva_free(kva, count);
+}
+
+/*
+ * do the things necessary to the struct buf to implement
+ * bounce buffers... inserted before the disk sort
+ */
+void
+vm_bounce_alloc(bp)
+ struct buf *bp;
+{
+ int countvmpg;
+ vm_offset_t vastart, vaend;
+ vm_offset_t vapstart, vapend;
+ vm_offset_t va, kva;
+ vm_offset_t pa;
+ int dobounceflag = 0;
+ int bounceindex;
+ int i;
+ int s;
+
+ if (bouncepages == 0)
+ return;
+
+ if (bp->b_bufsize < bp->b_bcount) {
+ printf("vm_bounce_alloc: b_bufsize(%d) < b_bcount(%d) !!!!\n",
+ bp->b_bufsize, bp->b_bcount);
+ bp->b_bufsize = bp->b_bcount;
+ }
+
+ vastart = (vm_offset_t) bp->b_data;
+ vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
+
+ vapstart = i386_trunc_page(vastart);
+ vapend = i386_round_page(vaend);
+ countvmpg = (vapend - vapstart) / NBPG;
+
+/*
+ * if any page is above 16MB, then go into bounce-buffer mode
+ */
+ va = vapstart;
+ for (i = 0; i < countvmpg; i++) {
+ pa = pmap_kextract(va);
+ if (pa >= SIXTEENMEG)
+ ++dobounceflag;
+ va += NBPG;
+ }
+ if (dobounceflag == 0)
+ return;
+
+ if (bouncepages < dobounceflag)
+ panic("Not enough bounce buffers!!!");
+
+/*
+ * allocate a replacement kva for b_addr
+ */
+ kva = vm_bounce_kva(countvmpg*NBPG, 1);
+ va = vapstart;
+ for (i = 0; i < countvmpg; i++) {
+ pa = pmap_kextract(va);
+ if (pa >= SIXTEENMEG) {
+ /*
+ * allocate a replacement page
+ */
+ vm_offset_t bpa = vm_bounce_page_find(1);
+ pmap_kenter(kva + (NBPG * i), bpa);
+ /*
+ * if we are writing, the copy the data into the page
+ */
+ if ((bp->b_flags & B_READ) == 0) {
+ pmap_update();
+ bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
+ }
+ } else {
+ /*
+ * use original page
+ */
+ pmap_kenter(kva + (NBPG * i), pa);
+ }
+ va += NBPG;
+ }
+ pmap_update();
+
+/*
+ * flag the buffer as being bounced
+ */
+ bp->b_flags |= B_BOUNCE;
+/*
+ * save the original buffer kva
+ */
+ bp->b_savekva = bp->b_data;
+/*
+ * put our new kva into the buffer (offset by original offset)
+ */
+ bp->b_data = (caddr_t) (((vm_offset_t) kva) |
+ ((vm_offset_t) bp->b_savekva & (NBPG - 1)));
+ return;
+}
+
+/*
+ * hook into biodone to free bounce buffer
+ */
+void
+vm_bounce_free(bp)
+ struct buf *bp;
+{
+ int i;
+ vm_offset_t origkva, bouncekva;
+ vm_offset_t vastart, vaend;
+ vm_offset_t vapstart, vapend;
+ int countbounce = 0;
+ vm_offset_t firstbouncepa = 0;
+ int firstbounceindex;
+ int countvmpg;
+ vm_offset_t bcount;
+ int s;
+
+/*
+ * if this isn't a bounced buffer, then just return
+ */
+ if ((bp->b_flags & B_BOUNCE) == 0)
+ return;
+
+ origkva = (vm_offset_t) bp->b_savekva;
+ bouncekva = (vm_offset_t) bp->b_data;
+
+ vastart = bouncekva;
+ vaend = bouncekva + bp->b_bufsize;
+ bcount = bp->b_bufsize;
+
+ vapstart = i386_trunc_page(vastart);
+ vapend = i386_round_page(vaend);
+
+ countvmpg = (vapend - vapstart) / NBPG;
+
+/*
+ * check every page in the kva space for b_addr
+ */
+ for (i = 0; i < countvmpg; i++) {
+ vm_offset_t mybouncepa;
+ vm_offset_t copycount;
+
+ copycount = i386_round_page(bouncekva + 1) - bouncekva;
+ mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
+
+/*
+ * if this is a bounced pa, then process as one
+ */
+ if ((mybouncepa >= bouncepa) && (mybouncepa < bouncepaend)) {
+ if (copycount > bcount)
+ copycount = bcount;
+/*
+ * if this is a read, then copy from bounce buffer into original buffer
+ */
+ if (bp->b_flags & B_READ)
+ bcopy((caddr_t) bouncekva, (caddr_t) origkva, copycount);
+/*
+ * free the bounce allocation
+ */
+ vm_bounce_page_free(i386_trunc_page(mybouncepa), 1);
+ }
+
+ origkva += copycount;
+ bouncekva += copycount;
+ bcount -= copycount;
+ }
+
+/*
+ * add the old kva into the "to free" list
+ */
+ bouncekva = i386_trunc_page((vm_offset_t) bp->b_data);
+ vm_bounce_kva_free( bouncekva, countvmpg*NBPG, 0);
+ bp->b_data = bp->b_savekva;
+ bp->b_savekva = 0;
+ bp->b_flags &= ~B_BOUNCE;
+
+ return;
+}
+
+/*
+ * init the bounce buffer system
+ */
+void
+vm_bounce_init()
+{
+ vm_offset_t minaddr, maxaddr;
+
+ kvasfreecnt = 0;
+
+ if (bouncepages == 0)
+ return;
+
+ bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
+ bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
+
+ if (!bounceallocarray)
+ panic("Cannot allocate bounce resource array\n");
+
+ bzero(bounceallocarray, bounceallocarraysize * sizeof(long));
+
+
+ bouncepa = pmap_kextract((vm_offset_t) bouncememory);
+ bouncepaend = bouncepa + bouncepages * NBPG;
+ bouncefree = bouncepages;
+}
+
+
+#ifdef BROKEN_IN_44
+static void
+cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
+ vm_offset_t kvanew;
+ vm_offset_t orig1, orig1cnt;
+ vm_offset_t orig2, orig2cnt;
+{
+ int i;
+ vm_offset_t pa;
+/*
+ * enter the transfer physical addresses into the new kva
+ */
+ for(i=0;i<orig1cnt;i++) {
+ vm_offset_t pa;
+ pa = pmap_kextract((caddr_t) orig1 + i * PAGE_SIZE);
+ pmap_kenter(kvanew + i * PAGE_SIZE, pa);
+ }
+
+ for(i=0;i<orig2cnt;i++) {
+ vm_offset_t pa;
+ pa = pmap_kextract((caddr_t) orig2 + i * PAGE_SIZE);
+ pmap_kenter(kvanew + (i + orig1cnt) * PAGE_SIZE, pa);
+ }
+ pmap_update();
+}
+
+void
+cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
+{
+ register struct buf *ap, *newbp;
+ int i, trycount=0;
+ vm_offset_t orig1pages, orig2pages;
+ vm_offset_t orig1begin, orig2begin;
+ vm_offset_t kvanew, kvaorig;
+
+ if( bp->b_bcount < MAXCLSTATS*PAGE_SIZE)
+ ++rqstats[bp->b_bcount/PAGE_SIZE];
+ /*
+ * If nothing on the activity queue, then
+ * we become the only thing.
+ */
+ ap = dp->b_actf;
+ if(ap == NULL) {
+ dp->b_actf = bp;
+ dp->b_actl = bp;
+ bp->av_forw = NULL;
+ return;
+ }
+
+ /*
+ * If we lie after the first (currently active)
+ * request, then we must locate the second request list
+ * and add ourselves to it.
+ */
+
+ if (bp->b_pblkno < ap->b_pblkno) {
+ while (ap->av_forw) {
+ /*
+ * Check for an ``inversion'' in the
+ * normally ascending block numbers,
+ * indicating the start of the second request list.
+ */
+ if (ap->av_forw->b_pblkno < ap->b_pblkno) {
+ /*
+ * Search the second request list
+ * for the first request at a larger
+ * block number. We go before that;
+ * if there is no such request, we go at end.
+ */
+ do {
+ if (bp->b_pblkno < ap->av_forw->b_pblkno)
+ goto insert;
+ ap = ap->av_forw;
+ } while (ap->av_forw);
+ goto insert; /* after last */
+ }
+ ap = ap->av_forw;
+ }
+ /*
+ * No inversions... we will go after the last, and
+ * be the first request in the second request list.
+ */
+ goto insert;
+ }
+ /*
+ * Request is at/after the current request...
+ * sort in the first request list.
+ */
+ while (ap->av_forw) {
+ /*
+ * We want to go after the current request
+ * if there is an inversion after it (i.e. it is
+ * the end of the first request list), or if
+ * the next request is a larger block than our request.
+ */
+ if (ap->av_forw->b_pblkno < ap->b_pblkno ||
+ bp->b_pblkno < ap->av_forw->b_pblkno )
+ goto insert;
+ ap = ap->av_forw;
+ }
+
+insert:
+
+ /*
+ * read clustering with new read-ahead disk drives hurts mostly, so
+ * we don't bother...
+ */
+ if( bp->b_flags & B_READ)
+ goto nocluster;
+ /*
+ * we currently only cluster I/O transfers that are at page-aligned
+ * kvas and transfers that are multiples of page lengths.
+ */
+ if ((bp->b_flags & B_BAD) == 0 &&
+ ((bp->b_bcount & PAGE_MASK) == 0) &&
+ (((vm_offset_t) bp->b_un.b_addr & PAGE_MASK) == 0)) {
+ if( maxio > MAXCLSTATS*PAGE_SIZE)
+ maxio = MAXCLSTATS*PAGE_SIZE;
+ /*
+ * merge with previous?
+ * conditions:
+ * 1) We reside physically immediately after the previous block.
+ * 2) The previous block is not first on the device queue because
+ * such a block might be active.
+ * 3) The mode of the two I/Os is identical.
+ * 4) The previous kva is page aligned and the previous transfer
+ * is a multiple of a page in length.
+ * 5) And the total I/O size would be below the maximum.
+ */
+ if( (ap->b_pblkno + (ap->b_bcount / DEV_BSIZE) == bp->b_pblkno) &&
+ (dp->b_actf != ap) &&
+ ((ap->b_flags & ~B_CLUSTER) == bp->b_flags) &&
+ ((ap->b_flags & B_BAD) == 0) &&
+ ((ap->b_bcount & PAGE_MASK) == 0) &&
+ (((vm_offset_t) ap->b_un.b_addr & PAGE_MASK) == 0) &&
+ (ap->b_bcount + bp->b_bcount < maxio)) {
+
+ orig1begin = (vm_offset_t) ap->b_un.b_addr;
+ orig1pages = ap->b_bcount / PAGE_SIZE;
+
+ orig2begin = (vm_offset_t) bp->b_un.b_addr;
+ orig2pages = bp->b_bcount / PAGE_SIZE;
+ /*
+ * see if we can allocate a kva, if we cannot, the don't
+ * cluster.
+ */
+ kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
+ if( !kvanew) {
+ goto nocluster;
+ }
+
+
+ if( (ap->b_flags & B_CLUSTER) == 0) {
+
+ /*
+ * get a physical buf pointer
+ */
+ newbp = (struct buf *)trypbuf();
+ if( !newbp) {
+ vm_bounce_kva_free( kvanew, PAGE_SIZE * (orig1pages + orig2pages), 1);
+ goto nocluster;
+ }
+
+ cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
+
+ /*
+ * build the new bp to be handed off to the device
+ */
+
+ --clstats[ap->b_bcount/PAGE_SIZE];
+ *newbp = *ap;
+ newbp->b_flags |= B_CLUSTER;
+ newbp->b_un.b_addr = (caddr_t) kvanew;
+ newbp->b_bcount += bp->b_bcount;
+ newbp->b_bufsize = newbp->b_bcount;
+ newbp->b_clusterf = ap;
+ newbp->b_clusterl = bp;
+ ++clstats[newbp->b_bcount/PAGE_SIZE];
+
+ /*
+ * enter the new bp onto the device queue
+ */
+ if( ap->av_forw)
+ ap->av_forw->av_back = newbp;
+ else
+ dp->b_actl = newbp;
+
+ if( dp->b_actf != ap )
+ ap->av_back->av_forw = newbp;
+ else
+ dp->b_actf = newbp;
+
+ /*
+ * enter the previous bps onto the cluster queue
+ */
+ ap->av_forw = bp;
+ bp->av_back = ap;
+
+ ap->av_back = NULL;
+ bp->av_forw = NULL;
+
+ } else {
+ vm_offset_t addr;
+
+ cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
+ /*
+ * free the old kva
+ */
+ vm_bounce_kva_free( orig1begin, ap->b_bufsize, 0);
+ --clstats[ap->b_bcount/PAGE_SIZE];
+
+ ap->b_un.b_addr = (caddr_t) kvanew;
+
+ ap->b_clusterl->av_forw = bp;
+ bp->av_forw = NULL;
+ bp->av_back = ap->b_clusterl;
+ ap->b_clusterl = bp;
+
+ ap->b_bcount += bp->b_bcount;
+ ap->b_bufsize = ap->b_bcount;
+ ++clstats[ap->b_bcount/PAGE_SIZE];
+ }
+ return;
+ /*
+ * merge with next?
+ * conditions:
+ * 1) We reside physically before the next block.
+ * 3) The mode of the two I/Os is identical.
+ * 4) The next kva is page aligned and the next transfer
+ * is a multiple of a page in length.
+ * 5) And the total I/O size would be below the maximum.
+ */
+ } else if( ap->av_forw &&
+ (bp->b_pblkno + (bp->b_bcount / DEV_BSIZE) == ap->av_forw->b_pblkno) &&
+ (bp->b_flags == (ap->av_forw->b_flags & ~B_CLUSTER)) &&
+ ((ap->av_forw->b_flags & B_BAD) == 0) &&
+ ((ap->av_forw->b_bcount & PAGE_MASK) == 0) &&
+ (((vm_offset_t) ap->av_forw->b_un.b_addr & PAGE_MASK) == 0) &&
+ (ap->av_forw->b_bcount + bp->b_bcount < maxio)) {
+
+ orig1begin = (vm_offset_t) bp->b_un.b_addr;
+ orig1pages = bp->b_bcount / PAGE_SIZE;
+
+ orig2begin = (vm_offset_t) ap->av_forw->b_un.b_addr;
+ orig2pages = ap->av_forw->b_bcount / PAGE_SIZE;
+
+ /*
+ * see if we can allocate a kva, if we cannot, the don't
+ * cluster.
+ */
+ kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
+ if( !kvanew) {
+ goto nocluster;
+ }
+
+ /*
+ * if next isn't a cluster we need to create one
+ */
+ if( (ap->av_forw->b_flags & B_CLUSTER) == 0) {
+
+ /*
+ * get a physical buf pointer
+ */
+ newbp = (struct buf *)trypbuf();
+ if( !newbp) {
+ vm_bounce_kva_free( kvanew, PAGE_SIZE * (orig1pages + orig2pages), 1);
+ goto nocluster;
+ }
+
+ cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
+ ap = ap->av_forw;
+ --clstats[ap->b_bcount/PAGE_SIZE];
+ *newbp = *ap;
+ newbp->b_flags |= B_CLUSTER;
+ newbp->b_un.b_addr = (caddr_t) kvanew;
+ newbp->b_blkno = bp->b_blkno;
+ newbp->b_pblkno = bp->b_pblkno;
+ newbp->b_bcount += bp->b_bcount;
+ newbp->b_bufsize = newbp->b_bcount;
+ newbp->b_clusterf = bp;
+ newbp->b_clusterl = ap;
+ ++clstats[newbp->b_bcount/PAGE_SIZE];
+
+ if( ap->av_forw)
+ ap->av_forw->av_back = newbp;
+ else
+ dp->b_actl = newbp;
+
+ if( dp->b_actf != ap )
+ ap->av_back->av_forw = newbp;
+ else
+ dp->b_actf = newbp;
+
+ bp->av_forw = ap;
+ ap->av_back = bp;
+
+ bp->av_back = NULL;
+ ap->av_forw = NULL;
+ } else {
+ vm_offset_t addr;
+
+ cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
+ ap = ap->av_forw;
+ vm_bounce_kva_free( orig2begin, ap->b_bufsize, 0);
+
+ ap->b_un.b_addr = (caddr_t) kvanew;
+ bp->av_forw = ap->b_clusterf;
+ ap->b_clusterf->av_back = bp;
+ ap->b_clusterf = bp;
+ bp->av_back = NULL;
+ --clstats[ap->b_bcount/PAGE_SIZE];
+
+ ap->b_blkno = bp->b_blkno;
+ ap->b_pblkno = bp->b_pblkno;
+ ap->b_bcount += bp->b_bcount;
+ ap->b_bufsize = ap->b_bcount;
+ ++clstats[ap->b_bcount/PAGE_SIZE];
+
+ }
+ return;
+ }
+ }
+ /*
+ * don't merge
+ */
+nocluster:
+ ++clstats[bp->b_bcount/PAGE_SIZE];
+ bp->av_forw = ap->av_forw;
+ if( bp->av_forw)
+ bp->av_forw->av_back = bp;
+ else
+ dp->b_actl = bp;
+
+ ap->av_forw = bp;
+ bp->av_back = ap;
+}
+#endif
+
+/*
+ * quick version of vm_fault
+ */
+
+void
+vm_fault_quick( v, prot)
+ vm_offset_t v;
+ int prot;
+{
+ if( (cpu_class == CPUCLASS_386) &&
+ (prot & VM_PROT_WRITE))
+ vm_fault(&curproc->p_vmspace->vm_map, v,
+ VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ else if( prot & VM_PROT_WRITE)
+ *(volatile char *)v += 0;
+ else
+ *(volatile char *)v;
+}
+
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the kernel stack and pcb, making the child
+ * ready to run, and marking it so that it can return differently
+ * than the parent. Returns 1 in the child process, 0 in the parent.
+ * We currently double-map the user area so that the stack is at the same
+ * address in each process; in the future we will probably relocate
+ * the frame pointers on the stack after copying.
+ */
+int
+cpu_fork(p1, p2)
+ register struct proc *p1, *p2;
+{
+ register struct user *up = p2->p_addr;
+ int foo, offset, addr, i;
+ extern char kstack[];
+ extern int mvesp();
+
+ /*
+ * Copy pcb and stack from proc p1 to p2.
+ * We do this as cheaply as possible, copying only the active
+ * part of the stack. The stack and pcb need to agree;
+ * this is tricky, as the final pcb is constructed by savectx,
+ * but its frame isn't yet on the stack when the stack is copied.
+ * swtch compensates for this when the child eventually runs.
+ * This should be done differently, with a single call
+ * that copies and updates the pcb+stack,
+ * replacing the bcopy and savectx.
+ */
+ p2->p_addr->u_pcb = p1->p_addr->u_pcb;
+ offset = mvesp() - (int)kstack;
+ bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
+ (unsigned) ctob(UPAGES) - offset);
+ p2->p_md.md_regs = p1->p_md.md_regs;
+
+ /*
+ * Wire top of address space of child to it's kstack.
+ * First, fault in a page of pte's to map it.
+ */
+#if 0
+ addr = trunc_page((u_int)vtopte(kstack));
+ vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);
+ for (i=0; i < UPAGES; i++)
+ pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG,
+ pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG),
+ /*
+ * The user area has to be mapped writable because
+ * it contains the kernel stack (when CR0_WP is on
+ * on a 486 there is no user-read/kernel-write
+ * mode). It is protected from user mode access
+ * by the segment limits.
+ */
+ VM_PROT_READ|VM_PROT_WRITE, TRUE);
+#endif
+ pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
+
+ /*
+ *
+ * Arrange for a non-local goto when the new process
+ * is started, to resume here, returning nonzero from setjmp.
+ */
+ if (savectx(up, 1)) {
+ /*
+ * Return 1 in child.
+ */
+ return (1);
+ }
+ return (0);
+}
+
+#ifdef notyet
+/*
+ * cpu_exit is called as the last action during exit.
+ *
+ * We change to an inactive address space and a "safe" stack,
+ * passing thru an argument to the new stack. Now, safely isolated
+ * from the resources we're shedding, we release the address space
+ * and any remaining machine-dependent resources, including the
+ * memory for the user structure and kernel stack.
+ *
+ * Next, we assign a dummy context to be written over by swtch,
+ * calling it to send this process off to oblivion.
+ * [The nullpcb allows us to minimize cost in mi_switch() by not having
+ * a special case].
+ */
+struct proc *swtch_to_inactive();
+volatile void
+cpu_exit(p)
+ register struct proc *p;
+{
+ static struct pcb nullpcb; /* pcb to overwrite on last swtch */
+
+#if NNPX > 0
+ npxexit(p);
+#endif /* NNPX */
+
+ /* move to inactive space and stack, passing arg accross */
+ p = swtch_to_inactive(p);
+
+ /* drop per-process resources */
+ vmspace_free(p->p_vmspace);
+ kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
+
+ p->p_addr = (struct user *) &nullpcb;
+ mi_switch();
+ /* NOTREACHED */
+}
+#else
+void
+cpu_exit(p)
+ register struct proc *p;
+{
+
+#if NNPX > 0
+ npxexit(p);
+#endif /* NNPX */
+ curproc = p;
+ mi_switch();
+ /*
+ * This is to shutup the compiler, and if swtch() failed I suppose
+ * this would be a good thing. This keeps gcc happy because panic
+ * is a volatile void function as well.
+ */
+ panic("cpu_exit");
+}
+
+void
+cpu_wait(p) struct proc *p; {
+/* extern vm_map_t upages_map; */
+ extern char kstack[];
+
+ /* drop per-process resources */
+ pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
+ ((vm_offset_t) p->p_addr) + ctob(UPAGES));
+ kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
+ vmspace_free(p->p_vmspace);
+}
+#endif
+
+/*
+ * Dump the machine specific header information at the start of a core dump.
+ */
+int
+cpu_coredump(p, vp, cred)
+ struct proc *p;
+ struct vnode *vp;
+ struct ucred *cred;
+{
+
+ return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
+ (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
+ p));
+}
+
+/*
+ * Set a red zone in the kernel stack after the u. area.
+ */
+void
+setredzone(pte, vaddr)
+ u_short *pte;
+ caddr_t vaddr;
+{
+/* eventually do this by setting up an expand-down stack segment
+ for ss0: selector, allowing stack access down to top of u.
+ this means though that protection violations need to be handled
+ thru a double fault exception that must do an integral task
+ switch to a known good context, within which a dump can be
+ taken. a sensible scheme might be to save the initial context
+ used by sched (that has physical memory mapped 1:1 at bottom)
+ and take the dump while still in mapped mode */
+}
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+
+void
+pagemove(from, to, size)
+ register caddr_t from, to;
+ int size;
+{
+ register vm_offset_t pa;
+
+ if (size & CLOFSET)
+ panic("pagemove");
+ while (size > 0) {
+ pa = pmap_kextract((vm_offset_t)from);
+ if (pa == 0)
+ panic("pagemove 2");
+ if (pmap_kextract((vm_offset_t)to) != 0)
+ panic("pagemove 3");
+ pmap_remove(kernel_pmap,
+ (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
+ pmap_kenter( (vm_offset_t)to, pa);
+ from += PAGE_SIZE;
+ to += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ pmap_update();
+}
+
+/*
+ * Convert kernel VA to physical address
+ */
+u_long
+kvtop(void *addr)
+{
+ vm_offset_t va;
+
+ va = pmap_kextract((vm_offset_t)addr);
+ if (va == 0)
+ panic("kvtop: zero page frame");
+ return((int)va);
+}
+
+extern vm_map_t phys_map;
+
+/*
+ * Map an IO request into kernel virtual address space.
+ *
+ * All requests are (re)mapped into kernel VA space.
+ * Notice that we use b_bufsize for the size of the buffer
+ * to be mapped. b_bcount might be modified by the driver.
+ */
+void
+vmapbuf(bp)
+ register struct buf *bp;
+{
+ register int npf;
+ register caddr_t addr;
+ int off;
+ vm_offset_t kva;
+ vm_offset_t pa, lastv, v;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vmapbuf");
+
+ lastv = 0;
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += PAGE_SIZE) {
+
+/*
+ * make sure that the pde is valid and held
+ */
+ v = trunc_page(((vm_offset_t)vtopte(addr)));
+ if (v != lastv) {
+ vm_fault_quick(v, VM_PROT_READ);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ lastv = v;
+ }
+
+/*
+ * do the vm_fault if needed, do the copy-on-write thing when
+ * reading stuff off device into memory.
+ */
+ vm_fault_quick(addr,
+ (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
+/*
+ * hold the data page
+ */
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ }
+
+ addr = bp->b_saveaddr = bp->b_un.b_addr;
+ off = (int)addr & PGOFSET;
+ npf = btoc(round_page(bp->b_bufsize + off));
+ kva = kmem_alloc_wait(phys_map, ctob(npf));
+ bp->b_un.b_addr = (caddr_t) (kva + off);
+ while (npf--) {
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t)addr);
+ if (pa == 0)
+ panic("vmapbuf: null page frame");
+ pmap_kenter(kva, trunc_page(pa));
+ addr += PAGE_SIZE;
+ kva += PAGE_SIZE;
+ }
+ pmap_update();
+}
+
+/*
+ * Free the io map PTEs associated with this IO operation.
+ * We also invalidate the TLB entries and restore the original b_addr.
+ */
+void
+vunmapbuf(bp)
+ register struct buf *bp;
+{
+ register int npf;
+ register caddr_t addr = bp->b_un.b_addr;
+ vm_offset_t kva,va,v,lastv,pa;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vunmapbuf");
+ npf = btoc(round_page(bp->b_bufsize + ((int)addr & PGOFSET)));
+ kva = (vm_offset_t)((int)addr & ~PGOFSET);
+ kmem_free_wakeup(phys_map, kva, ctob(npf));
+ bp->b_un.b_addr = bp->b_saveaddr;
+ bp->b_saveaddr = NULL;
+
+
+/*
+ * unhold the pde, and data pages
+ */
+ lastv = 0;
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += NBPG) {
+
+ /*
+ * release the data page
+ */
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+
+ /*
+ * and unhold the page table
+ */
+ v = trunc_page(((vm_offset_t)vtopte(addr)));
+ if (v != lastv) {
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+ lastv = v;
+ }
+ }
+}
+
+/*
+ * Force reset the processor by invalidating the entire address space!
+ */
+void
+cpu_reset() {
+
+ /* force a shutdown by unmapping entire address space ! */
+ bzero((caddr_t) PTD, NBPG);
+
+ /* "good night, sweet prince .... <THUNK!>" */
+ tlbflush();
+ /* NOTREACHED */
+ while(1);
+}
+
+/*
+ * Grow the user stack to allow for 'sp'. This version grows the stack in
+ * chunks of SGROWSIZ.
+ */
+int
+grow(p, sp)
+ struct proc *p;
+ u_int sp;
+{
+ unsigned int nss;
+ caddr_t v;
+ struct vmspace *vm = p->p_vmspace;
+
+ if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
+ return (1);
+
+ nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
+
+ if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
+ return (0);
+
+ if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
+ SGROWSIZ) < nss) {
+ int grow_amount;
+ /*
+ * If necessary, grow the VM that the stack occupies
+ * to allow for the rlimit. This allows us to not have
+ * to allocate all of the VM up-front in execve (which
+ * is expensive).
+ * Grow the VM by the amount requested rounded up to
+ * the nearest SGROWSIZ to provide for some hysteresis.
+ */
+ grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
+ v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
+ SGROWSIZ) - grow_amount;
+ /*
+ * If there isn't enough room to extend by SGROWSIZ, then
+ * just extend to the maximum size
+ */
+ if (v < vm->vm_maxsaddr) {
+ v = vm->vm_maxsaddr;
+ grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
+ }
+ if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
+ grow_amount, FALSE) != KERN_SUCCESS) {
+ return (0);
+ }
+ vm->vm_ssize += grow_amount >> PAGE_SHIFT;
+ }
+
+ return (1);
+}
diff --git a/sys/amd64/include/asmacros.h b/sys/amd64/include/asmacros.h
new file mode 100644
index 0000000..4af0b97
--- /dev/null
+++ b/sys/amd64/include/asmacros.h
@@ -0,0 +1,49 @@
+#define ALIGN_DATA .align 2 /* 4 byte alignment, zero filled */
+#define ALIGN_TEXT .align 2,0x90 /* 4-byte alignment, nop filled */
+#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte alignment (better for 486), nop filled */
+
+#define GEN_ENTRY(name) ALIGN_TEXT; .globl name; name:
+#define NON_GPROF_ENTRY(name) GEN_ENTRY(_/**/name)
+
+/* These three are place holders for future changes to the profiling code */
+#define MCOUNT_LABEL(name)
+#define MEXITCOUNT
+#define FAKE_MCOUNT(caller)
+
+#ifdef GPROF
+/*
+ * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
+ * over the mcounting.
+ */
+#define ALTENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
+#define ENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; 2:
+/*
+ * The call to mcount supports the usual (bad) conventions. We allocate
+ * some data and pass a pointer to it although the FreeBSD doesn't use
+ * the data. We set up a frame before calling mcount because that is
+ * the standard convention although it makes work for both mcount and
+ * callers.
+ */
+#define MCOUNT .data; ALIGN_DATA; 1:; .long 0; .text; \
+ pushl %ebp; movl %esp,%ebp; \
+ movl $1b,%eax; call mcount; popl %ebp
+#else
+/*
+ * ALTENTRY() has to align because it is before a corresponding ENTRY().
+ * ENTRY() has to align to because there may be no ALTENTRY() before it.
+ * If there is a previous ALTENTRY() then the alignment code is empty.
+ */
+#define ALTENTRY(name) GEN_ENTRY(_/**/name)
+#define ENTRY(name) GEN_ENTRY(_/**/name)
+#define MCOUNT
+
+#endif
+
+#ifdef DUMMY_NOPS /* this will break some older machines */
+#define FASTER_NOP
+#define NOP
+#else
+#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
+#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
+#endif
+
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
new file mode 100644
index 0000000..2216d71
--- /dev/null
+++ b/sys/amd64/include/cpu.h
@@ -0,0 +1,118 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
+ * $Id: cpu.h,v 1.4 1993/11/07 17:42:46 wollman Exp $
+ */
+
+#ifndef _MACHINE_CPU_H_
+#define _MACHINE_CPU_H_ 1
+
+/*
+ * Definitions unique to i386 cpu support.
+ */
+#include "machine/frame.h"
+#include "machine/segments.h"
+#include <machine/spl.h>
+
+/*
+ * definitions of cpu-dependent requirements
+ * referenced in generic code
+ */
+#undef COPY_SIGCODE /* don't copy sigcode above user stack in exec */
+
+#define cpu_exec(p) /* nothing */
+#define cpu_swapin(p) /* nothing */
+#define cpu_setstack(p, ap) (p)->p_md.md_regs = ap
+#define cpu_set_init_frame(p, fp) (p)->p_md.md_regs = fp
+
+#define CLKF_USERMODE(framep) (ISPL((framep)->cf_cs) == SEL_UPL)
+#define CLKF_INTR(framep) (0)
+#define CLKF_BASEPRI(framep) (((framep)->cf_ppl & ~SWI_AST_MASK) == 0)
+#define CLKF_PC(framep) ((framep)->cf_eip)
+
+#define resettodr() /* no todr to set */
+
+/*
+ * Preempt the current process if in interrupt from user mode,
+ * or after the current trap/syscall if in system mode.
+ */
+#define need_resched() { want_resched = 1; aston(); }
+
+/*
+ * Give a profiling tick to the current process from the softclock
+ * interrupt. On tahoe, request an ast to send us through trap(),
+ * marking the proc as needing a profiling tick.
+ */
+#define need_proftick(p) { (p)->p_flag |= P_OWEUPC; aston(); }
+
+/*
+ * Notify the current process (p) that it has a signal pending,
+ * process as soon as possible.
+ */
+#define signotify(p) aston()
+
+#define aston() setsoftast()
+#define astoff()
+
+/*
+ * pull in #defines for kinds of processors
+ */
+#include "machine/cputypes.h"
+
+struct cpu_nameclass {
+ char *cpu_name;
+ int cpu_class;
+};
+
+/*
+ * CTL_MACHDEP definitions.
+ */
+#define CPU_CONSDEV 1 /* dev_t: console terminal device */
+#define CPU_MAXID 2 /* number of valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "console_device", CTLTYPE_STRUCT }, \
+}
+
+#ifdef KERNEL
+extern int want_resched; /* resched was called */
+
+extern int cpu;
+extern int cpu_class;
+extern struct cpu_nameclass i386_cpus[];
+#endif
+#endif /* _MACHINE_CPU_H_ */
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
new file mode 100644
index 0000000..df871cb
--- /dev/null
+++ b/sys/amd64/include/cpufunc.h
@@ -0,0 +1,108 @@
+/*
+ * Functions to provide access to special i386 instructions.
+ * XXX - bezillions more are defined in locore.s but are not declared anywhere.
+ *
+ * $Id: cpufunc.h,v 1.9 1994/01/31 23:48:23 davidg Exp $
+ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define _MACHINE_CPUFUNC_H_ 1
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+#include "machine/spl.h"
+
+#ifdef __GNUC__
+
+static inline int bdb(void)
+{
+ extern int bdb_exists;
+
+ if (!bdb_exists)
+ return (0);
+ __asm("int $3");
+ return (1);
+}
+
+static inline void
+disable_intr(void)
+{
+ __asm __volatile("cli");
+}
+
+static inline void
+enable_intr(void)
+{
+ __asm __volatile("sti");
+}
+
+/*
+ * This roundabout method of returning a u_char helps stop gcc-1.40 from
+ * generating unnecessary movzbl's.
+ */
+#define inb(port) ((u_char) u_int_inb(port))
+
+static inline u_int
+u_int_inb(u_int port)
+{
+ u_char data;
+ /*
+ * We use %%dx and not %1 here because i/o is done at %dx and not at
+ * %edx, while gcc-2.2.2 generates inferior code (movw instead of movl)
+ * if we tell it to load (u_short) port.
+ */
+ __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
+ return data;
+}
+
+static inline void
+outb(u_int port, u_char data)
+{
+ register u_char al asm("ax");
+
+ al = data; /* help gcc-1.40's register allocator */
+ __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
+}
+
+static inline void
+tlbflush()
+{
+ __asm __volatile("movl %%cr3, %%eax; movl %%eax, %%cr3" : : : "ax");
+}
+
+static inline u_long
+rcr2()
+{
+ u_long data;
+ __asm __volatile("movl %%cr2,%%eax" : "=a" (data));
+ return data;
+}
+
+#else /* not __GNUC__ */
+extern void insque __P((void *, void *));
+extern void remque __P((void *));
+
+int bdb __P((void));
+void disable_intr __P((void));
+void enable_intr __P((void));
+u_char inb __P((u_int port));
+void outb __P((u_int port, u_int data)); /* XXX - incompat */
+
+#endif /* __GNUC__ */
+
+void load_cr0 __P((u_int cr0));
+u_int rcr0 __P((void));
+void load_cr3(u_long);
+u_long rcr3(void);
+
+void setidt __P((int, void (*)(), int, int));
+extern u_long kvtop(void *);
+extern void outw(int /*u_short*/, int /*u_short*/); /* XXX inline!*/
+extern void outsb(int /*u_short*/, void *, size_t);
+extern void outsw(int /*u_short*/, void *, size_t);
+extern void insw(int /*u_short*/, void *, size_t);
+extern void fillw(int /*u_short*/, void *, size_t);
+extern void filli(int, void *, size_t);
+
+#endif /* _MACHINE_CPUFUNC_H_ */
diff --git a/sys/amd64/include/cputypes.h b/sys/amd64/include/cputypes.h
new file mode 100644
index 0000000..c85fe19
--- /dev/null
+++ b/sys/amd64/include/cputypes.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1993 Christopher G. Demetriou
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software withough specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: cputypes.h,v 1.1 1993/10/08 13:40:54 rgrimes Exp $
+ */
+
+#ifndef _MACHINE_CPUTYPES_H_
+#define _MACHINE_CPUTYPES_H_ 1
+
+/*
+ * Classes of Processor
+ */
+
+#define CPUCLASS_286 0
+#define CPUCLASS_386 1
+#define CPUCLASS_486 2
+#define CPUCLASS_586 3
+
+/*
+ * Kinds of Processor
+ */
+
+#define CPU_286 0 /* Intel 80286 */
+#define CPU_386SX 1 /* Intel 80386SX */
+#define CPU_386 2 /* Intel 80386DX */
+#define CPU_486SX 3 /* Intel 80486SX */
+#define CPU_486 4 /* Intel 80486DX */
+#define CPU_586 5 /* Intel P.....m (I hate lawyers; it's TM) */
+
+#endif /* _MACHINE_CPUTYPES_H_ */
diff --git a/sys/amd64/include/db_machdep.h b/sys/amd64/include/db_machdep.h
new file mode 100644
index 0000000..a3f4064
--- /dev/null
+++ b/sys/amd64/include/db_machdep.h
@@ -0,0 +1,120 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ *
+ * $Id: db_machdep.h,v 1.2 1993/10/16 14:39:10 rgrimes Exp $
+ */
+
+#ifndef _I386_DB_MACHDEP_H_
+#define _I386_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+
+/* #include <mach/i386/vm_types.h> */
+/* #include <mach/i386/vm_param.h> */
+#include <vm/vm_prot.h>
+#include <vm/vm_param.h>
+#include <vm/vm_inherit.h>
+#include <vm/lock.h>
+/* #include <i386/thread.h> */ /* for thread_status */
+#include <machine/frame.h> /* for struct trapframe */
+/* #include <i386/eflags.h> */
+#include <machine/eflags.h> /* from Mach... */
+/* #include <i386/trap.h> */
+#include <machine/trap.h>
+
+#define i386_saved_state trapframe
+/* end of mangling */
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef int db_expr_t; /* expression - signed */
+
+typedef struct i386_saved_state db_regs_t;
+extern db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->tf_eip)
+
+#define BKPT_INST 0xcc /* breakpoint instruction */
+#define BKPT_SIZE (1) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK ddb_regs.tf_eip -= 1;
+
+#define db_clear_single_step(regs) ((regs)->tf_eflags &= ~EFL_TF)
+#define db_set_single_step(regs) ((regs)->tf_eflags |= EFL_TF)
+
+/* #define IS_BREAKPOINT_TRAP(type, code) ((type) == T_INT3) */
+/* #define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT) */
+/* using the 386bsd values, rather than the Mach ones: */
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_BPTFLT)
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_KDBTRAP)
+
+#define I_CALL 0xe8
+#define I_CALLI 0xff
+#define I_RET 0xc3
+#define I_IRET 0xcf
+
+#define inst_trap_return(ins) (((ins)&0xff) == I_IRET)
+#define inst_return(ins) (((ins)&0xff) == I_RET)
+#define inst_call(ins) (((ins)&0xff) == I_CALL || \
+ (((ins)&0xff) == I_CALLI && \
+ ((ins)&0x3800) == 0x1000))
+#define inst_load(ins) 0
+#define inst_store(ins) 0
+
+/* access capability and access macros */
+
+#define DB_ACCESS_LEVEL 2 /* access any space */
+#define DB_CHECK_ACCESS(addr,size,task) \
+ db_check_access(addr,size,task)
+#define DB_PHYS_EQ(task1,addr1,task2,addr2) \
+ db_phys_eq(task1,addr1,task2,addr2)
+#define DB_VALID_KERN_ADDR(addr) \
+ ((addr) >= VM_MIN_KERNEL_ADDRESS && \
+ (addr) < VM_MAX_KERNEL_ADDRESS)
+#define DB_VALID_ADDRESS(addr,user) \
+ ((!(user) && DB_VALID_KERN_ADDR(addr)) || \
+ ((user) && (addr) < VM_MIN_KERNEL_ADDRESS))
+
+boolean_t db_check_access(/* vm_offset_t, int, task_t */);
+boolean_t db_phys_eq(/* task_t, vm_offset_t, task_t, vm_offset_t */);
+
+/* macros for printing OS server dependent task name */
+
+#define DB_TASK_NAME(task) db_task_name(task)
+#define DB_TASK_NAME_TITLE "COMMAND "
+#define DB_TASK_NAME_LEN 23
+#define DB_NULL_TASK_NAME "? "
+
+void db_task_name(/* task_t */);
+
+/* macro for checking if a thread has used floating-point */
+
+#define db_thread_fp_used(thread) ((thread)->pcb->ims.ifps != 0)
+
+#endif /* _I386_DB_MACHDEP_H_ */
diff --git a/sys/amd64/include/exec.h b/sys/amd64/include/exec.h
new file mode 100644
index 0000000..f63ec49
--- /dev/null
+++ b/sys/amd64/include/exec.h
@@ -0,0 +1,128 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)exec.h 8.1 (Berkeley) 6/11/93
+ */
+
+#ifndef _EXEC_H_
+#define _EXEC_H_
+
+#define __LDPGSZ 4096
+
+/* Valid magic number check. */
+#define N_BADMAG(ex) \
+ (N_GETMAGIC(ex) != OMAGIC && N_GETMAGIC(ex) != NMAGIC && \
+ N_GETMAGIC(ex) != ZMAGIC && N_GETMAGIC(ex) != QMAGIC && \
+ N_GETMAGIC_NET(ex) != OMAGIC && N_GETMAGIC_NET(ex) != NMAGIC && \
+ N_GETMAGIC_NET(ex) != ZMAGIC && N_GETMAGIC_NET(ex) != QMAGIC)
+
+#define N_ALIGN(ex,x) \
+ (N_GETMAGIC(ex) == ZMAGIC || N_GETMAGIC(ex) == QMAGIC || \
+ N_GETMAGIC_NET(ex) == ZMAGIC || N_GETMAGIC_NET(ex) == QMAGIC ? \
+ ((x) + __LDPGSZ - 1) & ~(__LDPGSZ - 1) : (x))
+
+/* Address of the bottom of the text segment. */
+#define N_TXTADDR(ex) \
+ ((N_GETMAGIC(ex) == OMAGIC || N_GETMAGIC(ex) == NMAGIC || \
+ N_GETMAGIC(ex) == ZMAGIC) ? 0 : __LDPGSZ)
+
+/* Address of the bottom of the data segment. */
+#define N_DATADDR(ex) \
+ N_ALIGN(ex, N_TXTADDR(ex) + (ex).a_text)
+
+#define N_GETMAGIC(ex) \
+ ( (ex).a_midmag & 0xffff )
+#define N_GETMID(ex) \
+ ( (N_GETMAGIC_NET(ex) == ZMAGIC) ? N_GETMID_NET(ex) : \
+ ((ex).a_midmag >> 16) & 0x03ff )
+#define N_GETFLAG(ex) \
+ ( (N_GETMAGIC_NET(ex) == ZMAGIC) ? N_GETFLAG_NET(ex) : \
+ ((ex).a_midmag >> 26) & 0x3f )
+#define N_SETMAGIC(ex,mag,mid,flag) \
+ ( (ex).a_midmag = (((flag) & 0x3f) <<26) | (((mid) & 0x03ff) << 16) | \
+ ((mag) & 0xffff) )
+
+#define N_GETMAGIC_NET(ex) \
+ (ntohl((ex).a_midmag) & 0xffff)
+#define N_GETMID_NET(ex) \
+ ((ntohl((ex).a_midmag) >> 16) & 0x03ff)
+#define N_GETFLAG_NET(ex) \
+ ((ntohl((ex).a_midmag) >> 26) & 0x3f)
+#define N_SETMAGIC_NET(ex,mag,mid,flag) \
+ ( (ex).a_midmag = htonl( (((flag)&0x3f)<<26) | (((mid)&0x03ff)<<16) | \
+ (((mag)&0xffff)) ) )
+
+/* Text segment offset. */
+#define N_TXTOFF(ex) \
+ (N_GETMAGIC(ex) == ZMAGIC ? __LDPGSZ : (N_GETMAGIC(ex) == QMAGIC || \
+ N_GETMAGIC_NET(ex) == ZMAGIC) ? 0 : sizeof(struct exec))
+
+/* Data segment offset. */
+#define N_DATOFF(ex) \
+ N_ALIGN(ex, N_TXTOFF(ex) + (ex).a_text)
+
+/* Relocation table offset. */
+#define N_RELOFF(ex) \
+ N_ALIGN(ex, N_DATOFF(ex) + (ex).a_data)
+
+/* Symbol table offset. */
+#define N_SYMOFF(ex) \
+ (N_RELOFF(ex) + (ex).a_trsize + (ex).a_drsize)
+
+/* String table offset. */
+#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms)
+
+/*
+ * Header prepended to each a.out file.
+ * only manipulate the a_midmag field via the
+ * N_SETMAGIC/N_GET{MAGIC,MID,FLAG} macros in a.out.h
+ */
+
+struct exec {
+unsigned long a_midmag; /* htonl(flags<<26 | mid<<16 | magic) */
+unsigned long a_text; /* text segment size */
+unsigned long a_data; /* initialized data size */
+unsigned long a_bss; /* uninitialized data size */
+unsigned long a_syms; /* symbol table size */
+unsigned long a_entry; /* entry point */
+unsigned long a_trsize; /* text relocation size */
+unsigned long a_drsize; /* data relocation size */
+};
+#define a_magic a_midmag /* XXX Hack to work with current kern_execve.c */
+
+/* a_magic */
+#define OMAGIC 0407 /* old impure format */
+#define NMAGIC 0410 /* read-only text */
+#define ZMAGIC 0413 /* demand load format */
+#define QMAGIC 0314 /* "compact" demand load format */
+
+#endif /* !_EXEC_H_ */
diff --git a/sys/amd64/include/float.h b/sys/amd64/include/float.h
new file mode 100644
index 0000000..fb5967e
--- /dev/null
+++ b/sys/amd64/include/float.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)float.h 7.1 (Berkeley) 5/8/90
+ * $Id: float.h,v 1.4 1993/10/16 14:39:16 rgrimes Exp $
+ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_ 1
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS 1 /* FP addition rounds to nearest */
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP (-125) /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+
+#define LDBL_MANT_DIG DBL_MANT_DIG
+#define LDBL_EPSILON DBL_EPSILON
+#define LDBL_DIG DBL_DIG
+#define LDBL_MIN_EXP DBL_MIN_EXP
+#define LDBL_MIN DBL_MIN
+#define LDBL_MIN_10_EXP DBL_MIN_10_EXP
+#define LDBL_MAX_EXP DBL_MAX_EXP
+#define LDBL_MAX DBL_MAX
+#define LDBL_MAX_10_EXP DBL_MAX_10_EXP
+#endif /* _MACHINE_FLOAT_H_ */
diff --git a/sys/amd64/include/floatingpoint.h b/sys/amd64/include/floatingpoint.h
new file mode 100644
index 0000000..ed47cf6
--- /dev/null
+++ b/sys/amd64/include/floatingpoint.h
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (c) 1993 Andrew Moore, Talke Studio
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#) floatingpoint.h 1.0 (Berkeley) 9/23/93
+ * $Id: floatingpoint.h,v 1.3 1993/10/16 14:39:18 rgrimes Exp $
+ */
+
+/*
+ * IEEE floating point structure and function definitions
+ */
+
+#ifndef _FLOATINGPOINT_H_
+#define _FLOATINGPOINT_H_
+
+#include <sys/cdefs.h>
+#include <sys/ieeefp.h>
+
+#ifdef __GNUC__
+
+#ifdef __i386__
+
+#define fnstcw(addr) __asm("fnstcw %0" : "=m" (*addr) : "0" (*addr))
+#define fnstsw(addr) __asm("fnstsw %0" : "=m" (*addr) : "0" (*addr))
+#define fnstenv(addr) __asm("fnstenv %0" : "=m" (*addr) : "0" (*addr))
+#define fldenv(addr) __asm("fldenv %0" : : "m" (*addr))
+
+
+/*
+ * return the contents of a FP register
+ */
+static __inline__ int
+__fpgetreg(int _reg)
+{
+ unsigned short _mem;
+
+ switch(_reg) {
+ default:
+ fnstcw(&_mem);
+ break;
+ case FP_STKY_REG:
+ fnstsw(&_mem);
+ break;
+ }
+ return _mem;
+}
+
+/*
+ * set a FP mode; return previous mode
+ */
+static __inline__ int
+__fpsetreg(int _m, int _reg, int _fld, int _off)
+{
+ unsigned _env[7];
+ unsigned _p;
+
+ fnstenv(_env);
+ _p = (_env[_reg] & _fld) >> _off;
+ _env[_reg] = (_env[_reg] & ~_fld) | (_m << _off & _fld);
+ fldenv(_env);
+ return _p;
+}
+
+#endif /* __i386__ */
+
+#endif /* __GNUC__ */
+
+/*
+ * SysV/386 FP control interface
+ */
+#define fpgetround() ((__fpgetreg(FP_RND_REG) & FP_RND_FLD) >> FP_RND_OFF)
+#define fpsetround(m) __fpsetreg((m), FP_RND_REG, FP_RND_FLD, FP_RND_OFF)
+#define fpgetprec() ((__fpgetreg(FP_PRC_REG) & FP_PRC_FLD) >> FP_PRC_OFF)
+#define fpsetprec(m) __fpsetreg((m), FP_PRC_REG, FP_PRC_FLD, FP_PRC_OFF)
+#define fpgetmask() ((~__fpgetreg(FP_MSKS_REG) & FP_MSKS_FLD) >> FP_MSKS_OFF)
+#define fpsetmask(m) __fpsetreg(~(m), FP_MSKS_REG, FP_MSKS_FLD, FP_MSKS_OFF)
+#define fpgetsticky() ((__fpgetreg(FP_STKY_REG) & FP_STKY_FLD) >> FP_STKY_OFF)
+#define fpresetsticky(m) __fpsetreg(0, FP_STKY_REG, (m), FP_STKY_OFF)
+#define fpsetsticky(m) fpresetsticky(m)
+
+#endif /* !_FLOATINGPOINT_H_ */
diff --git a/sys/amd64/include/fpu.h b/sys/amd64/include/fpu.h
new file mode 100644
index 0000000..87cd6f9
--- /dev/null
+++ b/sys/amd64/include/fpu.h
@@ -0,0 +1,141 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)npx.h 5.3 (Berkeley) 1/18/91
+ * $Id: npx.h,v 1.2 1993/10/16 14:39:22 rgrimes Exp $
+ */
+
+/*
+ * 287/387 NPX Coprocessor Data Structures and Constants
+ * W. Jolitz 1/90
+ */
+
+#ifndef ___NPX87___
+#define ___NPX87___
+
+/* Environment information of floating point unit */
+struct env87 {
+ long en_cw; /* control word (16bits) */
+ long en_sw; /* status word (16bits) */
+ long en_tw; /* tag word (16bits) */
+ long en_fip; /* floating point instruction pointer */
+ u_short en_fcs; /* floating code segment selector */
+ u_short en_opcode; /* opcode last executed (11 bits ) */
+ long en_foo; /* floating operand offset */
+ long en_fos; /* floating operand segment selector */
+};
+
+/* Contents of each floating point accumulator */
+struct fpacc87 {
+#ifdef dontdef /* too unportable */
+ u_long fp_mantlo; /* mantissa low (31:0) */
+ u_long fp_manthi; /* mantissa high (63:32) */
+ int fp_exp:15; /* exponent */
+ int fp_sgn:1; /* mantissa sign */
+#else
+ u_char fp_bytes[10];
+#endif
+};
+
+/* Floating point context */
+struct save87 {
+ struct env87 sv_env; /* floating point control/status */
+ struct fpacc87 sv_ac[8]; /* accumulator contents, 0-7 */
+ u_long sv_ex_sw; /* status word for last exception (was pad) */
+ u_long sv_ex_tw; /* tag word for last exception (was pad) */
+#ifdef GPL_MATH_EMULATE
+ u_char sv_pad[60];
+#else
+ u_char sv_pad[8 * 2 - 2 * 4]; /* bogus historical padding */
+#endif /* GPL_MATH_EMULATE */
+};
+
+/* Cyrix EMC memory - mapped coprocessor context switch information */
+struct emcsts {
+ long em_msw; /* memory mapped status register when swtched */
+ long em_tar; /* memory mapped temp A register when swtched */
+ long em_dl; /* memory mapped D low register when swtched */
+};
+
+/* Intel prefers long real (53 bit) precision */
+#define __iBCS_NPXCW__ 0x262
+/* wfj prefers temporary real (64 bit) precision */
+#define __386BSD_NPXCW__ 0x362
+/*
+ * bde prefers 53 bit precision and all exceptions masked.
+ *
+ * The standard control word from finit is 0x37F, giving:
+ *
+ * round to nearest
+ * 64-bit precision
+ * all exceptions masked.
+ *
+ * Now I want:
+ *
+ * affine mode for 287's (if they work at all) (1 in bitfield 1<<12)
+ * 53-bit precision (2 in bitfield 3<<8)
+ * overflow exception unmasked (0 in bitfield 1<<3)
+ * zero divide exception unmasked (0 in bitfield 1<<2)
+ * invalid-operand exception unmasked (0 in bitfield 1<<0).
+ *
+ * 64-bit precision often gives bad results with high level languages
+ * because it makes the results of calculations depend on whether
+ * intermediate values are stored in memory or in FPU registers.
+ *
+ * The "Intel" and wfj control words have:
+ *
+ * underflow exception unmasked (0 in bitfield 1<<4)
+ *
+ * but that causes an unexpected exception in the test program 'paranoia'
+ * and makes denormals useless (DBL_MIN / 2 underflows). It doesn't make
+ * a lot of sense to trap underflow without trapping denormals.
+ *
+ * Later I will want the IEEE default of all exceptions masked. See the
+ * 0.0 math manpage for why this is better. The 0.1 math manpage is empty.
+ */
+#define __BDE_NPXCW__ 0x1272
+#define __BETTER_BDE_NPXCW__ 0x127f
+
+#ifdef __BROKEN_NPXCW__
+#ifdef __386BSD__
+#define __INITIAL_NPXCW__ __386BSD_NPXCW__
+#else
+#define __INITIAL_NPXCW__ __iBCS_NPXCW__
+#endif
+#else
+#define __INITIAL_NPXCW__ __BDE_NPXCW__
+#endif
+
+#endif ___NPX87___
diff --git a/sys/amd64/include/frame.h b/sys/amd64/include/frame.h
new file mode 100644
index 0000000..db2993e
--- /dev/null
+++ b/sys/amd64/include/frame.h
@@ -0,0 +1,140 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)frame.h 5.2 (Berkeley) 1/18/91
+ * $Id: frame.h,v 1.7 1994/01/03 07:55:32 davidg Exp $
+ */
+
+#ifndef _MACHINE_FRAME_H_
+#define _MACHINE_FRAME_H_ 1
+
+#include <sys/signal.h>
+
+/*
+ * System stack frames.
+ */
+
+/*
+ * Exception/Trap Stack Frame
+ */
+
+struct trapframe {
+ int tf_es;
+ int tf_ds;
+ int tf_edi;
+ int tf_esi;
+ int tf_ebp;
+ int tf_isp;
+ int tf_ebx;
+ int tf_edx;
+ int tf_ecx;
+ int tf_eax;
+ int tf_trapno;
+ /* below portion defined in 386 hardware */
+ int tf_err;
+ int tf_eip;
+ int tf_cs;
+ int tf_eflags;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int tf_esp;
+ int tf_ss;
+};
+
+extern int kdb_trap(int, int, struct trapframe *);
+
+/* Interrupt stack frame */
+
+struct intrframe {
+ int if_vec;
+ int if_ppl;
+ int if_es;
+ int if_ds;
+ int if_edi;
+ int if_esi;
+ int if_ebp;
+ int :32;
+ int if_ebx;
+ int if_edx;
+ int if_ecx;
+ int if_eax;
+ int :32; /* for compat with trap frame - trapno */
+ int :32; /* for compat with trap frame - err */
+ /* below portion defined in 386 hardware */
+ int if_eip;
+ int if_cs;
+ int if_eflags;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int if_esp;
+ int if_ss;
+};
+
+/* frame of clock (same as interrupt frame) */
+
+struct clockframe {
+ int cf_vec;
+ int cf_ppl;
+ int cf_es;
+ int cf_ds;
+ int cf_edi;
+ int cf_esi;
+ int cf_ebp;
+ int :32;
+ int cf_ebx;
+ int cf_edx;
+ int cf_ecx;
+ int cf_eax;
+ int :32; /* for compat with trap frame - trapno */
+ int :32; /* for compat with trap frame - err */
+ /* below portion defined in 386 hardware */
+ int cf_eip;
+ int cf_cs;
+ int cf_eflags;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int cf_esp;
+ int cf_ss;
+};
+
+/*
+ * Signal frame
+ */
+struct sigframe {
+ int sf_signum;
+ int sf_code;
+ struct sigcontext *sf_scp;
+ char *sf_addr;
+ sig_t sf_handler;
+ struct sigcontext sf_sc;
+};
+#endif /* _MACHINE_FRAME_H_ */
diff --git a/sys/amd64/include/npx.h b/sys/amd64/include/npx.h
new file mode 100644
index 0000000..87cd6f9
--- /dev/null
+++ b/sys/amd64/include/npx.h
@@ -0,0 +1,141 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)npx.h 5.3 (Berkeley) 1/18/91
+ * $Id: npx.h,v 1.2 1993/10/16 14:39:22 rgrimes Exp $
+ */
+
+/*
+ * 287/387 NPX Coprocessor Data Structures and Constants
+ * W. Jolitz 1/90
+ */
+
+#ifndef ___NPX87___
+#define ___NPX87___
+
+/* Environment information of floating point unit */
+struct env87 {
+ long en_cw; /* control word (16bits) */
+ long en_sw; /* status word (16bits) */
+ long en_tw; /* tag word (16bits) */
+ long en_fip; /* floating point instruction pointer */
+ u_short en_fcs; /* floating code segment selector */
+ u_short en_opcode; /* opcode last executed (11 bits ) */
+ long en_foo; /* floating operand offset */
+ long en_fos; /* floating operand segment selector */
+};
+
+/* Contents of each floating point accumulator */
+struct fpacc87 {
+#ifdef dontdef /* too unportable */
+ u_long fp_mantlo; /* mantissa low (31:0) */
+ u_long fp_manthi; /* mantissa high (63:32) */
+ int fp_exp:15; /* exponent */
+ int fp_sgn:1; /* mantissa sign */
+#else
+ u_char fp_bytes[10];
+#endif
+};
+
+/* Floating point context */
+struct save87 {
+ struct env87 sv_env; /* floating point control/status */
+ struct fpacc87 sv_ac[8]; /* accumulator contents, 0-7 */
+ u_long sv_ex_sw; /* status word for last exception (was pad) */
+ u_long sv_ex_tw; /* tag word for last exception (was pad) */
+#ifdef GPL_MATH_EMULATE
+ u_char sv_pad[60];
+#else
+ u_char sv_pad[8 * 2 - 2 * 4]; /* bogus historical padding */
+#endif /* GPL_MATH_EMULATE */
+};
+
+/* Cyrix EMC memory - mapped coprocessor context switch information */
+struct emcsts {
+ long em_msw; /* memory mapped status register when swtched */
+ long em_tar; /* memory mapped temp A register when swtched */
+ long em_dl; /* memory mapped D low register when swtched */
+};
+
+/* Intel prefers long real (53 bit) precision */
+#define __iBCS_NPXCW__ 0x262
+/* wfj prefers temporary real (64 bit) precision */
+#define __386BSD_NPXCW__ 0x362
+/*
+ * bde prefers 53 bit precision and all exceptions masked.
+ *
+ * The standard control word from finit is 0x37F, giving:
+ *
+ * round to nearest
+ * 64-bit precision
+ * all exceptions masked.
+ *
+ * Now I want:
+ *
+ * affine mode for 287's (if they work at all) (1 in bitfield 1<<12)
+ * 53-bit precision (2 in bitfield 3<<8)
+ * overflow exception unmasked (0 in bitfield 1<<3)
+ * zero divide exception unmasked (0 in bitfield 1<<2)
+ * invalid-operand exception unmasked (0 in bitfield 1<<0).
+ *
+ * 64-bit precision often gives bad results with high level languages
+ * because it makes the results of calculations depend on whether
+ * intermediate values are stored in memory or in FPU registers.
+ *
+ * The "Intel" and wfj control words have:
+ *
+ * underflow exception unmasked (0 in bitfield 1<<4)
+ *
+ * but that causes an unexpected exception in the test program 'paranoia'
+ * and makes denormals useless (DBL_MIN / 2 underflows). It doesn't make
+ * a lot of sense to trap underflow without trapping denormals.
+ *
+ * Later I will want the IEEE default of all exceptions masked. See the
+ * 0.0 math manpage for why this is better. The 0.1 math manpage is empty.
+ */
+#define __BDE_NPXCW__ 0x1272
+#define __BETTER_BDE_NPXCW__ 0x127f
+
+#ifdef __BROKEN_NPXCW__
+#ifdef __386BSD__
+#define __INITIAL_NPXCW__ __386BSD_NPXCW__
+#else
+#define __INITIAL_NPXCW__ __iBCS_NPXCW__
+#endif
+#else
+#define __INITIAL_NPXCW__ __BDE_NPXCW__
+#endif
+
+#endif ___NPX87___
diff --git a/sys/amd64/include/pc/display.h b/sys/amd64/include/pc/display.h
new file mode 100644
index 0000000..9e64a3f
--- /dev/null
+++ b/sys/amd64/include/pc/display.h
@@ -0,0 +1,45 @@
+/*
+ * IBM PC display definitions
+ *
+ * $Id$
+ */
+
+/* Color attributes for foreground text */
+
+#define FG_BLACK 0
+#define FG_BLUE 1
+#define FG_GREEN 2
+#define FG_CYAN 3
+#define FG_RED 4
+#define FG_MAGENTA 5
+#define FG_BROWN 6
+#define FG_LIGHTGREY 7
+#define FG_DARKGREY 8
+#define FG_LIGHTBLUE 9
+#define FG_LIGHTGREEN 10
+#define FG_LIGHTCYAN 11
+#define FG_LIGHTRED 12
+#define FG_LIGHTMAGENTA 13
+#define FG_YELLOW 14
+#define FG_WHITE 15
+#define FG_BLINK 0x80
+
+/* Color attributes for text background */
+
+#define BG_BLACK 0x00
+#define BG_BLUE 0x10
+#define BG_GREEN 0x20
+#define BG_CYAN 0x30
+#define BG_RED 0x40
+#define BG_MAGENTA 0x50
+#define BG_BROWN 0x60
+#define BG_LIGHTGREY 0x70
+
+/* Monochrome attributes for foreground text */
+
+#define FG_UNDERLINE 0x01
+#define FG_INTENSE 0x08
+
+/* Monochrome attributes for text background */
+
+#define BG_INTENSE 0x10
diff --git a/sys/amd64/include/pcb.h b/sys/amd64/include/pcb.h
new file mode 100644
index 0000000..990e5f9
--- /dev/null
+++ b/sys/amd64/include/pcb.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pcb.h 5.10 (Berkeley) 5/12/91
+ * $Id: pcb.h,v 1.3 1993/11/07 17:42:59 wollman Exp $
+ */
+
+#ifndef _I386_PCB_H_
+#define _I386_PCB_H_
+
+/*
+ * Intel 386 process control block
+ */
+#include "machine/tss.h"
+#include "machine/npx.h"
+
+struct pcb {
+ struct i386tss pcb_tss;
+#define pcb_ksp pcb_tss.tss_esp0
+#define pcb_ptd pcb_tss.tss_cr3
+#define pcb_cr3 pcb_ptd
+#define pcb_pc pcb_tss.tss_eip
+#define pcb_psl pcb_tss.tss_eflags
+#define pcb_usp pcb_tss.tss_esp
+#define pcb_fp pcb_tss.tss_ebp
+#ifdef notyet
+ u_char pcb_iomap[NPORT/sizeof(u_char)]; /* i/o port bitmap */
+#endif
+ caddr_t pcb_ldt; /* per process (user) LDT */
+ int pcb_ldt_len; /* number of LDT entries */
+ struct save87 pcb_savefpu; /* floating point state for 287/387 */
+ struct emcsts pcb_saveemc; /* Cyrix EMC state */
+/*
+ * Software pcb (extension)
+ */
+ int pcb_flags;
+#ifdef notused
+#define FP_WASUSED 0x01 /* process has used fltng pnt hardware */
+#define FP_NEEDSSAVE 0x02 /* ... that needs save on next context switch */
+#define FP_NEEDSRESTORE 0x04 /* ... that needs restore on next DNA fault */
+#endif
+#define FP_USESEMC 0x08 /* process uses EMC memory-mapped mode */
+#define FP_SOFTFP 0x20 /* process using software fltng pnt emulator */
+ short pcb_iml; /* interrupt mask level */
+ caddr_t pcb_onfault; /* copyin/out fault recovery */
+ long pcb_sigc[8]; /* XXX signal code trampoline */
+ int pcb_cmap2; /* XXX temporary PTE - will prefault instead */
+};
+
+/*
+ * The pcb is augmented with machine-dependent additional data for
+ * core dumps. For the i386: ???
+ */
+struct md_coredump {
+};
+
+#ifdef KERNEL
+extern struct pcb *curpcb; /* our current running pcb */
+#endif
+
+#endif /* _I386_PCB_H_ */
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
new file mode 100644
index 0000000..7ddcebd
--- /dev/null
+++ b/sys/amd64/include/pmap.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Derived from hp300 version by Mike Hibler, this version by William
+ * Jolitz uses a recursive map [a pde points to the page directory] to
+ * map the page tables using the pagetables themselves. This is done to
+ * reduce the impact on kernel virtual memory for lots of sparse address
+ * space, and to reduce the cost of memory to each process.
+ *
+ * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
+ * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
+ * $Id: pmap.h,v 1.12 1994/03/24 23:12:48 davidg Exp $
+ */
+
+#ifndef _PMAP_MACHINE_
+#define _PMAP_MACHINE_ 1
+
+#include <machine/pte.h>
+
+typedef unsigned int *pd_entry_t;
+typedef unsigned int *pt_entry_t;
+
+/*
+ * NKPDE controls the virtual space of the kernel, what ever is left, minus
+ * the alternate page table area is given to the user (NUPDE)
+ */
+/*
+ * NKPDE controls the virtual space of the kernel, what ever is left is
+ * given to the user (NUPDE)
+ */
+#ifndef NKPT
+#define NKPT 24 /* actual number of kernel pte's */
+#endif
+#ifndef NKPDE
+#define NKPDE 63 /* addressable number of kpte's */
+#endif
+
+#define NUPDE (NPTEPG-NKPDE) /* number of user pde's */
+
+/*
+ * The *PTDI values control the layout of virtual memory
+ *
+ * XXX This works for now, but I am not real happy with it, I'll fix it
+ * right after I fix locore.s and the magic 28K hole
+ */
+#define APTDPTDI (NPTEPG-1) /* alt ptd entry that points to APTD */
+#define KPTDI (APTDPTDI-NKPDE)/* start of kernel virtual pde's */
+#define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
+#define KSTKPTDI (PTDPTDI-1) /* ptd entry for u./kernel&user stack */
+#define KSTKPTEOFF (NBPG/sizeof(pd_entry_t)-UPAGES) /* pte entry for kernel stack */
+
+#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
+#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
+
+/*
+ * Address of current and alternate address space page table maps
+ * and directories.
+ */
+#ifdef KERNEL
+extern pt_entry_t PTmap[], APTmap[], Upte;
+extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
+
+extern int IdlePTD; /* physical address of "Idle" state directory */
+#endif
+
+/*
+ * virtual address to page table entry and
+ * to physical address. Likewise for alternate address space.
+ * Note: these work recursively, thus vtopte of a pte will give
+ * the corresponding pde that in turn maps it.
+ */
+#define vtopte(va) (PTmap + i386_btop(va))
+#define kvtopte(va) vtopte(va)
+#define ptetov(pt) (i386_ptob(pt - PTmap))
+#define vtophys(va) (((int) (*vtopte(va))&PG_FRAME) | ((int)(va) & PGOFSET))
+#define ispt(va) ((va) >= UPT_MIN_ADDRESS && (va) <= KPT_MAX_ADDRESS)
+
+#define avtopte(va) (APTmap + i386_btop(va))
+#define ptetoav(pt) (i386_ptob(pt - APTmap))
+#define avtophys(va) (((int) (*avtopte(va))&PG_FRAME) | ((int)(va) & PGOFSET))
+
+#ifdef KERNEL
+/*
+ * Routine: pmap_kextract
+ * Function:
+ * Extract the physical page address associated
+ * kernel virtual address.
+ */
+static inline vm_offset_t
+pmap_kextract(va)
+ vm_offset_t va;
+{
+ vm_offset_t pa = *(int *)vtopte(va);
+ pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
+ return pa;
+}
+#endif
+
+/*
+ * macros to generate page directory/table indicies
+ */
+
+#define pdei(va) (((va)&PD_MASK)>>PD_SHIFT)
+#define ptei(va) (((va)&PT_MASK)>>PG_SHIFT)
+
+/*
+ * Pmap stuff
+ */
+
+struct pmap {
+ pd_entry_t *pm_pdir; /* KVA of page directory */
+ boolean_t pm_pdchanged; /* pdir changed */
+ short pm_dref; /* page directory ref count */
+ short pm_count; /* pmap reference count */
+ simple_lock_data_t pm_lock; /* lock on pmap */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ long pm_ptpages; /* more stats: PT pages */
+};
+
+typedef struct pmap *pmap_t;
+
+#ifdef KERNEL
+extern pmap_t kernel_pmap;
+#endif
+
+/*
+ * Macros for speed
+ */
+#define PMAP_ACTIVATE(pmapp, pcbp) \
+ if ((pmapp) != NULL /*&& (pmapp)->pm_pdchanged */) { \
+ (pcbp)->pcb_cr3 = \
+ pmap_extract(kernel_pmap, (vm_offset_t)(pmapp)->pm_pdir); \
+ if ((pmapp) == &curproc->p_vmspace->vm_pmap) \
+ load_cr3((pcbp)->pcb_cr3); \
+ (pmapp)->pm_pdchanged = FALSE; \
+ }
+
+#define PMAP_DEACTIVATE(pmapp, pcbp)
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ pmap_t pv_pmap; /* pmap where mapping lies */
+ vm_offset_t pv_va; /* virtual address for mapping */
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+#define PV_CI 0x01 /* all entries must be cache inhibited */
+#define PV_PTPAGE 0x02 /* entry maps a page table page */
+
+#ifdef KERNEL
+
+pv_entry_t pv_table; /* array of entries, one per page */
+
+#define pa_index(pa) atop(pa - vm_first_phys)
+#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+
+extern pmap_t pmap_create(vm_size_t);
+extern void pmap_pinit(struct pmap *);
+extern void pmap_destroy(pmap_t);
+extern void pmap_release(struct pmap *);
+extern void pmap_reference(pmap_t);
+extern void pmap_remove(struct pmap *, vm_offset_t, vm_offset_t);
+extern void pmap_protect(struct pmap *, vm_offset_t, vm_offset_t, vm_prot_t);
+extern void pmap_enter(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
+extern void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
+extern inline pt_entry_t *pmap_pte(pmap_t, vm_offset_t);
+extern vm_offset_t pmap_extract(pmap_t, vm_offset_t);
+extern void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
+extern void pmap_collect(pmap_t);
+struct pcb; extern void pmap_activate(pmap_t, struct pcb *);
+extern pmap_t pmap_kernel(void);
+extern void pmap_pageable(pmap_t, vm_offset_t, vm_offset_t, boolean_t);
+
+
+#endif /* KERNEL */
+
+#endif /* _PMAP_MACHINE_ */
diff --git a/sys/amd64/include/proc.h b/sys/amd64/include/proc.h
new file mode 100644
index 0000000..92de3af
--- /dev/null
+++ b/sys/amd64/include/proc.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)proc.h 7.1 (Berkeley) 5/15/91
+ * $Id: proc.h,v 1.2 1993/10/16 14:39:24 rgrimes Exp $
+ */
+
+#ifndef _MACHINE_PROC_H_
+#define _MACHINE_PROC_H_ 1
+
+/*
+ * Machine-dependent part of the proc structure for hp300.
+ */
+struct mdproc {
+ int md_flags; /* machine-dependent flags */
+ int *md_regs; /* registers on current frame */
+};
+
+/* md_flags */
+#define MDP_AST 0x0001 /* async trap pending */
+#endif /* _MACHINE_PROC_H_ */
diff --git a/sys/amd64/include/profile.h b/sys/amd64/include/profile.h
new file mode 100644
index 0000000..f30efff
--- /dev/null
+++ b/sys/amd64/include/profile.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)profile.h 8.1 (Berkeley) 6/11/93
+ */
+
+#define _MCOUNT_DECL static inline void _mcount
+
+#define MCOUNT \
+extern void mcount() asm("mcount"); void mcount() { \
+ int selfpc, frompcindex; \
+ /* \
+ * find the return address for mcount, \
+ * and the return address for mcount's caller. \
+ * \
+ * selfpc = pc pushed by mcount call \
+ */ \
+ asm("movl 4(%%ebp),%0" : "=r" (selfpc)); \
+ /* \
+ * frompcindex = pc pushed by jsr into self. \
+ * In GCC the caller's stack frame has already been built so we \
+ * have to chase a6 to find caller's raddr. \
+ */ \
+ asm("movl (%%ebp),%0" : "=r" (frompcindex)); \
+ frompcindex = ((int *)frompcindex)[1]; \
+ _mcount(frompcindex, selfpc); \
+}
diff --git a/sys/amd64/include/psl.h b/sys/amd64/include/psl.h
new file mode 100644
index 0000000..997fb23
--- /dev/null
+++ b/sys/amd64/include/psl.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)psl.h 5.2 (Berkeley) 1/18/91
+ * $Id: psl.h,v 1.3 1993/11/07 17:43:04 wollman Exp $
+ */
+
+#ifndef _MACHINE_PSL_H_
+#define _MACHINE_PSL_H_ 1
+
+/*
+ * 386 processor status longword.
+ */
+#define PSL_C 0x00000001 /* carry bit */
+#define PSL_PF 0x00000004 /* parity bit */
+#define PSL_AF 0x00000010 /* bcd carry bit */
+#define PSL_Z 0x00000040 /* zero bit */
+#define PSL_N 0x00000080 /* negative bit */
+#define PSL_T 0x00000100 /* trace enable bit */
+#define PSL_I 0x00000200 /* interrupt enable bit */
+#define PSL_D 0x00000400 /* string instruction direction bit */
+#define PSL_V 0x00000800 /* overflow bit */
+#define PSL_IOPL 0x00003000 /* i/o priviledge level enable */
+#define PSL_NT 0x00004000 /* nested task bit */
+#define PSL_RF 0x00010000 /* restart flag bit */
+#define PSL_VM 0x00020000 /* virtual 8086 mode bit */
+
+#define PSL_MBZ 0xffc08028 /* must be zero bits */
+#define PSL_MBO 0x00000002 /* must be one bits */
+
+#define PSL_USERSET (PSL_MBO | PSL_I)
+#define PSL_USERCLR (PSL_MBZ | PSL_NT)
+#endif /* _MACHINE_PSL_H_ */
diff --git a/sys/amd64/include/ptrace.h b/sys/amd64/include/ptrace.h
new file mode 100644
index 0000000..bfcc55f
--- /dev/null
+++ b/sys/amd64/include/ptrace.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ptrace.h 8.1 (Berkeley) 6/11/93
+ */
+
+/*
+ * Machine dependent trace commands.
+ *
+ * None for the i386 at this time.
+ */
diff --git a/sys/amd64/include/reg.h b/sys/amd64/include/reg.h
new file mode 100644
index 0000000..2a1f061
--- /dev/null
+++ b/sys/amd64/include/reg.h
@@ -0,0 +1,106 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)reg.h 5.5 (Berkeley) 1/18/91
+ * $Id: reg.h,v 1.6 1994/01/03 07:55:34 davidg Exp $
+ */
+
+#ifndef _MACHINE_REG_H_
+#define _MACHINE_REG_H_ 1
+
+/*
+ * Location of the users' stored
+ * registers within appropriate frame of 'trap' and 'syscall', relative to
+ * base of stack frame.
+ * Normal usage is u.u_ar0[XX] in kernel.
+ */
+
+/* When referenced during a trap/exception, registers are at these offsets */
+
+#define tES (0)
+#define tDS (1)
+#define tEDI (2)
+#define tESI (3)
+#define tEBP (4)
+#define tISP (5)
+#define tEBX (6)
+#define tEDX (7)
+#define tECX (8)
+#define tEAX (9)
+
+#define tERR (11)
+
+#define tEIP (12)
+#define tCS (13)
+#define tEFLAGS (14)
+#define tESP (15)
+#define tSS (16)
+
+/*
+ * Registers accessible to ptrace(2) syscall for debugger
+ * The machine-dependent code for PT_{SET,GET}REGS needs to
+ * use whichver order, defined above, is correct, so that it
+ * is all invisible to the user.
+ */
+struct reg {
+ unsigned int r_es;
+ unsigned int r_ds;
+ unsigned int r_edi;
+ unsigned int r_esi;
+ unsigned int r_ebp;
+ unsigned int r_isp;
+ unsigned int r_ebx;
+ unsigned int r_edx;
+ unsigned int r_ecx;
+ unsigned int r_eax;
+ unsigned int r_trapno;
+ unsigned int r_err;
+ unsigned int r_eip;
+ unsigned int r_cs;
+ unsigned int r_eflags;
+ unsigned int r_esp;
+ unsigned int r_ss;
+};
+
+/*
+ * Register set accessible via /proc/$pid/fpreg
+ */
+struct fpreg {
+#if 0
+ int fpr_xxx; /* not implemented */
+#endif
+};
+
+#endif /* _MACHINE_REG_H_ */
diff --git a/sys/amd64/include/reloc.h b/sys/amd64/include/reloc.h
new file mode 100644
index 0000000..386241d
--- /dev/null
+++ b/sys/amd64/include/reloc.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)reloc.h 8.1 (Berkeley) 6/10/93
+ */
+
+/* Relocation format. */
+struct relocation_info {
+ int r_address; /* offset in text or data segment */
+ unsigned int r_symbolnum : 24, /* ordinal number of add symbol */
+ r_pcrel : 1, /* 1 if value should be pc-relative */
+ r_length : 2, /* log base 2 of value's width */
+ r_extern : 1, /* 1 if need to add symbol to value */
+ : 4; /* reserved */
+};
diff --git a/sys/amd64/include/segments.h b/sys/amd64/include/segments.h
new file mode 100644
index 0000000..023a0cf
--- /dev/null
+++ b/sys/amd64/include/segments.h
@@ -0,0 +1,235 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)segments.h 7.1 (Berkeley) 5/9/91
+ * $Id: segments.h,v 1.3 1993/11/07 17:43:08 wollman Exp $
+ */
+
+#ifndef _MACHINE_SEGMENTS_H_
+#define _MACHINE_SEGMENTS_H_ 1
+
+/*
+ * 386 Segmentation Data Structures and definitions
+ * William F. Jolitz (william@ernie.berkeley.edu) 6/20/1989
+ */
+
+/*
+ * Selectors
+ */
+
+#define ISPL(s) ((s)&3) /* what is the priority level of a selector */
+#define SEL_KPL 0 /* kernel priority level */
+#define SEL_UPL 3 /* user priority level */
+#define ISLDT(s) ((s)&SEL_LDT) /* is it local or global */
+#define SEL_LDT 4 /* local descriptor table */
+#define IDXSEL(s) (((s)>>3) & 0x1fff) /* index of selector */
+#define LSEL(s,r) (((s)<<3) | SEL_LDT | r) /* a local selector */
+#define GSEL(s,r) (((s)<<3) | r) /* a global selector */
+
+/*
+ * Memory and System segment descriptors
+ */
+struct segment_descriptor {
+ unsigned sd_lolimit:16 ; /* segment extent (lsb) */
+ unsigned sd_lobase:24 __attribute__ ((packed));
+ /* segment base address (lsb) */
+ unsigned sd_type:5 ; /* segment type */
+ unsigned sd_dpl:2 ; /* segment descriptor priority level */
+ unsigned sd_p:1 ; /* segment descriptor present */
+ unsigned sd_hilimit:4 ; /* segment extent (msb) */
+ unsigned sd_xx:2 ; /* unused */
+ unsigned sd_def32:1 ; /* default 32 vs 16 bit size */
+ unsigned sd_gran:1 ; /* limit granularity (byte/page units)*/
+ unsigned sd_hibase:8 ; /* segment base address (msb) */
+} ;
+
+/*
+ * Gate descriptors (e.g. indirect descriptors)
+ */
+struct gate_descriptor {
+ unsigned gd_looffset:16 ; /* gate offset (lsb) */
+ unsigned gd_selector:16 ; /* gate segment selector */
+ unsigned gd_stkcpy:5 ; /* number of stack wds to cpy */
+ unsigned gd_xx:3 ; /* unused */
+ unsigned gd_type:5 ; /* segment type */
+ unsigned gd_dpl:2 ; /* segment descriptor priority level */
+ unsigned gd_p:1 ; /* segment descriptor present */
+ unsigned gd_hioffset:16 ; /* gate offset (msb) */
+} ;
+
+/*
+ * Generic descriptor
+ */
+union descriptor {
+ struct segment_descriptor sd;
+ struct gate_descriptor gd;
+};
+
+ /* system segments and gate types */
+#define SDT_SYSNULL 0 /* system null */
+#define SDT_SYS286TSS 1 /* system 286 TSS available */
+#define SDT_SYSLDT 2 /* system local descriptor table */
+#define SDT_SYS286BSY 3 /* system 286 TSS busy */
+#define SDT_SYS286CGT 4 /* system 286 call gate */
+#define SDT_SYSTASKGT 5 /* system task gate */
+#define SDT_SYS286IGT 6 /* system 286 interrupt gate */
+#define SDT_SYS286TGT 7 /* system 286 trap gate */
+#define SDT_SYSNULL2 8 /* system null again */
+#define SDT_SYS386TSS 9 /* system 386 TSS available */
+#define SDT_SYSNULL3 10 /* system null again */
+#define SDT_SYS386BSY 11 /* system 386 TSS busy */
+#define SDT_SYS386CGT 12 /* system 386 call gate */
+#define SDT_SYSNULL4 13 /* system null again */
+#define SDT_SYS386IGT 14 /* system 386 interrupt gate */
+#define SDT_SYS386TGT 15 /* system 386 trap gate */
+
+ /* memory segment types */
+#define SDT_MEMRO 16 /* memory read only */
+#define SDT_MEMROA 17 /* memory read only accessed */
+#define SDT_MEMRW 18 /* memory read write */
+#define SDT_MEMRWA 19 /* memory read write accessed */
+#define SDT_MEMROD 20 /* memory read only expand dwn limit */
+#define SDT_MEMRODA 21 /* memory read only expand dwn limit accessed */
+#define SDT_MEMRWD 22 /* memory read write expand dwn limit */
+#define SDT_MEMRWDA 23 /* memory read write expand dwn limit acessed */
+#define SDT_MEME 24 /* memory execute only */
+#define SDT_MEMEA 25 /* memory execute only accessed */
+#define SDT_MEMER 26 /* memory execute read */
+#define SDT_MEMERA 27 /* memory execute read accessed */
+#define SDT_MEMEC 28 /* memory execute only conforming */
+#define SDT_MEMEAC 29 /* memory execute only accessed conforming */
+#define SDT_MEMERC 30 /* memory execute read conforming */
+#define SDT_MEMERAC 31 /* memory execute read accessed conforming */
+
+/* is memory segment descriptor pointer ? */
+#define ISMEMSDP(s) ((s->d_type) >= SDT_MEMRO && (s->d_type) <= SDT_MEMERAC)
+
+/* is 286 gate descriptor pointer ? */
+#define IS286GDP(s) (((s->d_type) >= SDT_SYS286CGT \
+ && (s->d_type) < SDT_SYS286TGT))
+
+/* is 386 gate descriptor pointer ? */
+#define IS386GDP(s) (((s->d_type) >= SDT_SYS386CGT \
+ && (s->d_type) < SDT_SYS386TGT))
+
+/* is gate descriptor pointer ? */
+#define ISGDP(s) (IS286GDP(s) || IS386GDP(s))
+
+/* is segment descriptor pointer ? */
+#define ISSDP(s) (ISMEMSDP(s) || !ISGDP(s))
+
+/* is system segment descriptor pointer ? */
+#define ISSYSSDP(s) (!ISMEMSDP(s) && !ISGDP(s))
+
+/*
+ * Software definitions are in this convenient format,
+ * which are translated into inconvenient segment descriptors
+ * when needed to be used by the 386 hardware
+ */
+
+struct soft_segment_descriptor {
+ unsigned ssd_base ; /* segment base address */
+ unsigned ssd_limit ; /* segment extent */
+ unsigned ssd_type:5 ; /* segment type */
+ unsigned ssd_dpl:2 ; /* segment descriptor priority level */
+ unsigned ssd_p:1 ; /* segment descriptor present */
+ unsigned ssd_xx:4 ; /* unused */
+ unsigned ssd_xx1:2 ; /* unused */
+ unsigned ssd_def32:1 ; /* default 32 vs 16 bit size */
+ unsigned ssd_gran:1 ; /* limit granularity (byte/page units)*/
+};
+
+extern ssdtosd() ; /* to decode a ssd */
+extern sdtossd() ; /* to encode a sd */
+
+/*
+ * region descriptors, used to load gdt/idt tables before segments yet exist.
+ */
+struct region_descriptor {
+ unsigned rd_limit:16; /* segment extent */
+ unsigned rd_base:32 __attribute__ ((packed)); /* base address */
+};
+
+/*
+ * Segment Protection Exception code bits
+ */
+
+#define SEGEX_EXT 0x01 /* recursive or externally induced */
+#define SEGEX_IDT 0x02 /* interrupt descriptor table */
+#define SEGEX_TI 0x04 /* local descriptor table */
+ /* other bits are affected descriptor index */
+#define SEGEX_IDX(s) ((s)>>3)&0x1fff)
+
+/*
+ * Size of IDT table
+ */
+
+#define NIDT 256
+#define NRSVIDT 32 /* reserved entries for cpu exceptions */
+
+/*
+ * Entries in the Global Descriptor Table (GDT)
+ */
+#define GNULL_SEL 0 /* Null Descriptor */
+#define GCODE_SEL 1 /* Kernel Code Descriptor */
+#define GDATA_SEL 2 /* Kernel Data Descriptor */
+#define GLDT_SEL 3 /* LDT - eventually one per process */
+#define GTGATE_SEL 4 /* Process task switch gate */
+#define GPANIC_SEL 5 /* Task state to consider panic from */
+#define GPROC0_SEL 6 /* Task state process slot zero and up */
+#define GUSERLDT_SEL 7 /* User LDT */
+#define NGDT GUSERLDT_SEL+1
+
+/*
+ * Entries in the Local Descriptor Table (LDT)
+ */
+#define LSYS5CALLS_SEL 0 /* forced by intel BCS */
+#define LSYS5SIGR_SEL 1
+#define L43BSDCALLS_SEL 2 /* notyet */
+#define LUCODE_SEL 3
+#define LUDATA_SEL 4
+/* seperate stack, es,fs,gs sels ? */
+/* #define LPOSIXCALLS_SEL 5*/ /* notyet */
+#define NLDT LUDATA_SEL+1
+
+#ifdef KERNEL
+extern int currentldt;
+extern union descriptor gdt[NGDT];
+extern union descriptor ldt[NLDT];
+extern struct soft_segment_descriptor gdt_segs[];
+#endif
+
+#endif /* _MACHINE_SEGMENTS_H_ */
diff --git a/sys/amd64/include/signal.h b/sys/amd64/include/signal.h
new file mode 100644
index 0000000..16cbef2
--- /dev/null
+++ b/sys/amd64/include/signal.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signal.h 8.1 (Berkeley) 6/11/93
+ */
+
+/*
+ * Machine-dependent signal definitions
+ */
+
+typedef int sig_atomic_t;
+
+#ifndef _POSIX_SOURCE
+#include <machine/trap.h> /* codes for SIGILL, SIGFPE */
+#endif
+
+/*
+ * Information pushed on stack when a signal is delivered.
+ * This is used by the kernel to restore state following
+ * execution of the signal handler. It is also made available
+ * to the handler to allow it to restore state properly if
+ * a non-standard exit is performed.
+ */
+struct sigcontext {
+ int sc_onstack; /* sigstack state to restore */
+ int sc_mask; /* signal mask to restore */
+ int sc_esp; /* machine state */
+ int sc_ebp;
+ int sc_isp;
+ int sc_eip;
+ int sc_efl;
+ int sc_es;
+ int sc_ds;
+ int sc_cs;
+ int sc_ss;
+ int sc_edi;
+ int sc_esi;
+ int sc_ebx;
+ int sc_edx;
+ int sc_ecx;
+ int sc_eax;
+# define sc_sp sc_esp
+# define sc_fp sc_ebp
+# define sc_pc sc_eip
+# define sc_ps sc_efl
+};
diff --git a/sys/amd64/include/specialreg.h b/sys/amd64/include/specialreg.h
new file mode 100644
index 0000000..935b1ed
--- /dev/null
+++ b/sys/amd64/include/specialreg.h
@@ -0,0 +1,64 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)specialreg.h 7.1 (Berkeley) 5/9/91
+ * $Id: specialreg.h,v 1.2 1993/10/16 14:39:32 rgrimes Exp $
+ */
+
+#ifndef _MACHINE_SPECIALREG_H_
+#define _MACHINE_SPECIALREG_H_ 1
+
+/*
+ * Bits in 386 special registers:
+ */
+
+#define CR0_PE 0x00000001 /* Protected mode Enable */
+#define CR0_MP 0x00000002 /* "Math" Present (NPX or NPX emulator) */
+#ifdef notused
+#define CR0_EM 0x00000004 /* EMulate non-NPX coproc. (trap ESC only) */
+#endif
+#define CR0_TS 0x00000008 /* Task Switched (if MP, trap ESC and WAIT) */
+#ifdef notused
+#define CR0_ET 0x00000010 /* Extension Type (387 (if set) vs 287) */
+#endif
+#define CR0_PG 0x80000000 /* PaGing enable */
+
+/*
+ * Bits in 486 special registers:
+ */
+
+#define CR0_NE 0x00000020 /* Numeric Error enable (EX16 vs IRQ13) */
+#define CR0_WP 0x00010000 /* Write Protect (honor ~PG_W in all modes) */
+#ifdef notyet
+#define CR0_AM 0x00040000 /* Alignment Mask (set to enable AC flag) */
+#endif
+#endif /* _MACHINE_SPECIALREG_H_ */
diff --git a/sys/amd64/include/sysarch.h b/sys/amd64/include/sysarch.h
new file mode 100644
index 0000000..2649ba5
--- /dev/null
+++ b/sys/amd64/include/sysarch.h
@@ -0,0 +1,24 @@
+/*
+ * Architecture specific syscalls (i386)
+ *
+ * $Id: sysarch.h,v 1.2 1993/10/16 14:39:35 rgrimes Exp $
+ */
+#ifndef _MACHINE_SYSARCH_H_
+#define _MACHINE_SYSARCH_H_ 1
+
+#include <sys/cdefs.h>
+
+#define I386_GET_LDT 0
+#define I386_SET_LDT 1
+
+#ifdef KERNEL
+/* nothing here yet... */
+#else /* not KERNEL */
+__BEGIN_DECLS
+
+int i386_get_ldt __P((int, union descriptor *, int));
+int i386_set_ldt __P((int, union descriptor *, int));
+
+__END_DECLS
+#endif /* not KERNEL */
+#endif /* _MACHINE_SYSARCH_H_ */
diff --git a/sys/amd64/include/trap.h b/sys/amd64/include/trap.h
new file mode 100644
index 0000000..aa832ff
--- /dev/null
+++ b/sys/amd64/include/trap.h
@@ -0,0 +1,101 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)trap.h 5.4 (Berkeley) 5/9/91
+ * $Id: trap.h,v 1.2 1993/10/16 14:39:37 rgrimes Exp $
+ */
+
+#ifndef _MACHINE_TRAP_H_
+#define _MACHINE_TRAP_H_ 1
+
+/*
+ * Trap type values
+ * also known in trap.c for name strings
+ */
+
+#define T_RESADFLT 0 /* reserved addressing */
+#define T_PRIVINFLT 1 /* privileged instruction */
+#define T_RESOPFLT 2 /* reserved operand */
+#define T_BPTFLT 3 /* breakpoint instruction */
+#define T_SYSCALL 5 /* system call (kcall) */
+#define T_ARITHTRAP 6 /* arithmetic trap */
+#define T_ASTFLT 7 /* system forced exception */
+#define T_SEGFLT 8 /* segmentation (limit) fault */
+#define T_PROTFLT 9 /* protection fault */
+#define T_TRCTRAP 10 /* trace trap */
+#define T_PAGEFLT 12 /* page fault */
+#define T_TABLEFLT 13 /* page table fault */
+#define T_ALIGNFLT 14 /* alignment fault */
+#define T_KSPNOTVAL 15 /* kernel stack pointer not valid */
+#define T_BUSERR 16 /* bus error */
+#define T_KDBTRAP 17 /* kernel debugger trap */
+
+#define T_DIVIDE 18 /* integer divide fault */
+#define T_NMI 19 /* non-maskable trap */
+#define T_OFLOW 20 /* overflow trap */
+#define T_BOUND 21 /* bound instruction fault */
+#define T_DNA 22 /* device not available fault */
+#define T_DOUBLEFLT 23 /* double fault */
+#define T_FPOPFLT 24 /* fp coprocessor operand fetch fault */
+#define T_TSSFLT 25 /* invalid tss fault */
+#define T_SEGNPFLT 26 /* segment not present fault */
+#define T_STKFLT 27 /* stack fault */
+#define T_RESERVED 28 /* reserved fault base */
+
+/* definitions for <sys/signal.h> */
+#define ILL_RESAD_FAULT T_RESADFLT
+#define ILL_PRIVIN_FAULT T_PRIVINFLT
+#define ILL_RESOP_FAULT T_RESOPFLT
+#define ILL_ALIGN_FAULT T_ALIGNFLT
+#define ILL_FPOP_FAULT T_FPOPFLT /* coprocessor operand fault */
+
+/* codes for SIGFPE/ARITHTRAP */
+#define FPE_INTOVF_TRAP 0x1 /* integer overflow */
+#define FPE_INTDIV_TRAP 0x2 /* integer divide by zero */
+#define FPE_FLTDIV_TRAP 0x3 /* floating/decimal divide by zero */
+#define FPE_FLTOVF_TRAP 0x4 /* floating overflow */
+#define FPE_FLTUND_TRAP 0x5 /* floating underflow */
+#define FPE_FPU_NP_TRAP 0x6 /* floating point unit not present */
+#define FPE_SUBRNG_TRAP 0x7 /* subrange out of bounds */
+
+/* codes for SIGBUS */
+#define BUS_PAGE_FAULT T_PAGEFLT /* page fault protection base */
+#define BUS_SEGNP_FAULT T_SEGNPFLT /* segment not present */
+#define BUS_STK_FAULT T_STKFLT /* stack segment */
+#define BUS_SEGM_FAULT T_RESERVED /* segment protection base */
+
+/* Trap's coming from user mode */
+#define T_USER 0x100
+#endif /* _MACHINE_TRAP_H_ */
diff --git a/sys/amd64/include/tss.h b/sys/amd64/include/tss.h
new file mode 100644
index 0000000..1fada6e
--- /dev/null
+++ b/sys/amd64/include/tss.h
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)tss.h 5.4 (Berkeley) 1/18/91
+ * $Id: tss.h,v 1.3 1993/11/07 17:43:16 wollman Exp $
+ */
+
+#ifndef _MACHINE_TSS_H_
+#define _MACHINE_TSS_H_ 1
+
+/*
+ * Intel 386 Context Data Type
+ */
+
+struct i386tss {
+ int tss_link; /* actually 16 bits: top 16 bits must be zero */
+ int tss_esp0; /* kernel stack pointer priviledge level 0 */
+#define tss_ksp tss_esp0
+ int tss_ss0; /* actually 16 bits: top 16 bits must be zero */
+ int tss_esp1; /* kernel stack pointer priviledge level 1 */
+ int tss_ss1; /* actually 16 bits: top 16 bits must be zero */
+ int tss_esp2; /* kernel stack pointer priviledge level 2 */
+ int tss_ss2; /* actually 16 bits: top 16 bits must be zero */
+ int tss_cr3; /* page table directory */
+#define tss_ptd tss_cr3
+ int tss_eip; /* program counter */
+#define tss_pc tss_eip
+ int tss_eflags; /* program status longword */
+#define tss_psl tss_eflags
+ int tss_eax;
+ int tss_ecx;
+ int tss_edx;
+ int tss_ebx;
+ int tss_esp; /* user stack pointer */
+#define tss_usp tss_esp
+ int tss_ebp; /* user frame pointer */
+#define tss_fp tss_ebp
+ int tss_esi;
+ int tss_edi;
+ int tss_es; /* actually 16 bits: top 16 bits must be zero */
+ int tss_cs; /* actually 16 bits: top 16 bits must be zero */
+ int tss_ss; /* actually 16 bits: top 16 bits must be zero */
+ int tss_ds; /* actually 16 bits: top 16 bits must be zero */
+ int tss_fs; /* actually 16 bits: top 16 bits must be zero */
+ int tss_gs; /* actually 16 bits: top 16 bits must be zero */
+ int tss_ldt; /* actually 16 bits: top 16 bits must be zero */
+ int tss_ioopt; /* options & io offset bitmap: currently zero */
+ /* XXX unimplemented .. i/o permission bitmap */
+};
+#endif /* _MACHINE_TSS_H_ */
diff --git a/sys/amd64/include/varargs.h b/sys/amd64/include/varargs.h
new file mode 100644
index 0000000..1a913ec
--- /dev/null
+++ b/sys/amd64/include/varargs.h
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)varargs.h 8.2 (Berkeley) 3/22/94
+ */
+
+#ifndef _VARARGS_H_
+#define _VARARGS_H_
+
+typedef char *va_list;
+
+#define va_dcl int va_alist;
+
+#define va_start(ap) \
+ ap = (char *)&va_alist
+
+#ifdef KERNEL
+#define va_arg(ap, type) \
+ ((type *)(ap += sizeof(type)))[-1]
+#else
+#define va_arg(ap, type) \
+ ((type *)(ap += sizeof(type) < sizeof(int) ? \
+ (abort(), 0) : sizeof(type)))[-1]
+#endif
+
+#define va_end(ap)
+
+#endif /* !_VARARGS_H_ */
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
new file mode 100644
index 0000000..05218ad
--- /dev/null
+++ b/sys/amd64/include/vmparam.h
@@ -0,0 +1,263 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
+ * $Id: vmparam.h,v 1.11 1994/01/14 16:24:00 davidg Exp $
+ */
+
+
+#ifndef _MACHINE_VMPARAM_H_
+#define _MACHINE_VMPARAM_H_ 1
+
+/*
+ * Machine dependent constants for 386.
+ */
+
+/*
+ * Virtual address space arrangement. On 386, both user and kernel
+ * share the address space, not unlike the vax.
+ * USRTEXT is the start of the user text/data space, while USRSTACK
+ * is the top (end) of the user stack. Immediately above the user stack
+ * resides the user structure, which is UPAGES long and contains the
+ * kernel stack.
+ *
+ * Immediately after the user structure is the page table map, and then
+ * kernal address space.
+ */
+#define USRTEXT 0UL
+/* #define USRSTACK 0xFDBFE000UL */
+#define BTOPUSRSTACK (0xFDC00-(UPAGES)) /* btop(USRSTACK) */
+#define LOWPAGES 0UL
+#define HIGHPAGES UPAGES
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#define MAXTSIZ (16UL*1024*1024) /* max text size */
+#ifndef DFLDSIZ
+#define DFLDSIZ (64UL*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (128UL*1024*1024) /* max data size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (8UL*1024*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ (64UL*1024*1024) /* max stack size */
+#endif
+#ifndef SGROWSIZ
+#define SGROWSIZ (128UL*1024) /* amount to grow stack */
+#endif
+
+/*
+ * Default sizes of swap allocation chunks (see dmap.h).
+ * The actual values may be changed in vminit() based on MAXDSIZ.
+ * With MAXDSIZ of 16Mb and NDMAP of 38, dmmax will be 1024.
+ */
+#define DMMIN 32 /* smallest swap allocation */
+#define DMMAX 4096 /* largest potential swap allocation */
+#define DMTEXT 1024 /* swap allocation for text */
+
+/*
+ * Sizes of the system and user portions of the system page table.
+ */
+#define SYSPTSIZE (2*NPTEPG)
+#define USRPTSIZE (2*NPTEPG)
+
+/*
+ * Size of the Shared Memory Pages page table.
+ */
+#ifndef SHMMAXPGS
+#define SHMMAXPGS 512 /* XXX until we have more kmap space */
+#endif
+
+/*
+ * Size of User Raw I/O map
+ */
+#define USRIOSIZE 1024
+
+/*
+ * The size of the clock loop.
+ */
+#define LOOPPAGES (maxfree - firstfree)
+
+/*
+ * The time for a process to be blocked before being very swappable.
+ * This is a number of seconds which the system takes as being a non-trivial
+ * amount of real time. You probably shouldn't change this;
+ * it is used in subtle ways (fractions and multiples of it are, that is, like
+ * half of a ``long time'', almost a long time, etc.)
+ * It is related to human patience and other factors which don't really
+ * change over time.
+ */
+#define MAXSLP 20
+
+/*
+ * A swapped in process is given a small amount of core without being bothered
+ * by the page replacement algorithm. Basically this says that if you are
+ * swapped in you deserve some resources. We protect the last SAFERSS
+ * pages against paging and will just swap you out rather than paging you.
+ * Note that each process has at least UPAGES+CLSIZE pages which are not
+ * paged anyways (this is currently 8+2=10 pages or 5k bytes), so this
+ * number just means a swapped in process is given around 25k bytes.
+ * Just for fun: current memory prices are 4600$ a megabyte on VAX (4/22/81),
+ * so we loan each swapped in process memory worth 100$, or just admit
+ * that we don't consider it worthwhile and swap it out to disk which costs
+ * $30/mb or about $0.75.
+ * { wfj 6/16/89: Retail AT memory expansion $800/megabyte, loan of $17
+ * on disk costing $7/mb or $0.18 (in memory still 100:1 in cost!) }
+ */
+#define SAFERSS 8 /* nominal ``small'' resident set size
+ protected against replacement */
+
+/*
+ * DISKRPM is used to estimate the number of paging i/o operations
+ * which one can expect from a single disk controller.
+ */
+#define DISKRPM 60
+
+/*
+ * Klustering constants. Klustering is the gathering
+ * of pages together for pagein/pageout, while clustering
+ * is the treatment of hardware page size as though it were
+ * larger than it really is.
+ *
+ * KLMAX gives maximum cluster size in CLSIZE page (cluster-page)
+ * units. Note that KLMAX*CLSIZE must be <= DMMIN in dmap.h.
+ */
+
+#define KLMAX (4/CLSIZE)
+#define KLSEQL (2/CLSIZE) /* in klust if vadvise(VA_SEQL) */
+#define KLIN (4/CLSIZE) /* default data/stack in klust */
+#define KLTXT (4/CLSIZE) /* default text in klust */
+#define KLOUT (4/CLSIZE)
+
+/*
+ * KLSDIST is the advance or retard of the fifo reclaim for sequential
+ * processes data space.
+ */
+#define KLSDIST 3 /* klusters advance/retard for seq. fifo */
+
+/*
+ * There are two clock hands, initially separated by HANDSPREAD bytes
+ * (but at most all of user memory). The amount of time to reclaim
+ * a page once the pageout process examines it increases with this
+ * distance and decreases as the scan rate rises.
+ */
+#define HANDSPREAD (2 * 1024 * 1024)
+
+/*
+ * The number of times per second to recompute the desired paging rate
+ * and poke the pagedaemon.
+ */
+#define RATETOSCHEDPAGING 4
+
+/*
+ * Believed threshold (in megabytes) for which interleaved
+ * swapping area is desirable.
+ */
+#define LOTSOFMEM 2
+
+#define mapin(pte, v, pfnum, prot) \
+ {(*(int *)(pte) = ((pfnum)<<PGSHIFT) | (prot)) ; }
+
+/*
+ * Mach derived constants
+ */
+
+/* user/kernel map constants */
+#define KERNBASE (0-(NKPDE+1)*(NBPG*NPTEPG))
+#define KERNSIZE (NKPDE*NBPG*NPTEPG)
+
+#define VM_MIN_ADDRESS ((vm_offset_t)0)
+#define VM_MAXUSER_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NPTEPG+UPAGES)))
+#define USRSTACK VM_MAXUSER_ADDRESS
+#define UPT_MIN_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*NPTEPG))
+#define UPT_MAX_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NKPDE+2)))
+#define VM_MAX_ADDRESS UPT_MAX_ADDRESS
+#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NKPDE+2)))
+#define UPDT VM_MIN_KERNEL_ADDRESS
+#define KPT_MIN_ADDRESS ((vm_offset_t)(KERNBASE) - (NBPG*(NKPDE+1)))
+#define KPT_MAX_ADDRESS ((vm_offset_t)(KERNBASE) - NBPG)
+#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)ALT_MIN_ADDRESS - NBPG)
+#define ALT_MIN_ADDRESS ((vm_offset_t)((APTDPTDI) << 22))
+#define HIGHPAGES UPAGES
+
+
+/* virtual sizes (bytes) for various kernel submaps */
+#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
+#define VM_KMEM_SIZE (16 * 1024 * 1024)
+#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
+
+/* pcb base */
+#define pcbb(p) ((u_int)(p)->p_addr)
+
+/*
+ * Flush MMU TLB
+ */
+
+#ifndef I386_CR3PAT
+#define I386_CR3PAT 0x0
+#endif
+
+#ifdef notyet
+#define _cr3() ({u_long rtn; \
+ asm (" movl %%cr3,%%eax; movl %%eax,%0 " \
+ : "=g" (rtn) \
+ : \
+ : "ax"); \
+ rtn; \
+})
+
+#define load_cr3(s) ({ u_long val; \
+ val = (s) | I386_CR3PAT; \
+ asm ("movl %0,%%eax; movl %%eax,%%cr3" \
+ : \
+ : "g" (val) \
+ : "ax"); \
+})
+
+#define tlbflush() ({ u_long val; \
+ val = u.u_pcb.pcb_ptd | I386_CR3PAT; \
+ asm ("movl %0,%%eax; movl %%eax,%%cr3" \
+ : \
+ : "g" (val) \
+ : "ax"); \
+})
+#endif
+#endif /* _MACHINE_VMPARAM_H_ */
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
new file mode 100644
index 0000000..e40079a
--- /dev/null
+++ b/sys/amd64/isa/clock.c
@@ -0,0 +1,442 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz and Don Ahn.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)clock.c 7.2 (Berkeley) 5/12/91
+ * $Id: clock.c,v 1.6 1994/02/06 22:48:13 davidg Exp $
+ */
+
+/*
+ * Primitive clock interrupt routines.
+ */
+#include "param.h"
+#include "systm.h"
+#include "time.h"
+#include "kernel.h"
+#include "machine/segments.h"
+#include "machine/frame.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/isa.h"
+#include "i386/isa/rtc.h"
+#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
+
+/* X-tals being what they are, it's nice to be able to fudge this one... */
+/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
+#ifndef TIMER_FREQ
+#define TIMER_FREQ 1193182 /* XXX - should be in isa.h */
+#endif
+#define TIMER_DIV(x) ((TIMER_FREQ+(x)/2)/(x))
+
+void hardclock();
+static int beeping;
+int timer0_divisor = TIMER_DIV(100); /* XXX should be hz */
+u_int timer0_prescale;
+static char timer0_state = 0, timer2_state = 0;
+static char timer0_reprogram = 0;
+static void (*timer_func)() = hardclock;
+static void (*new_function)();
+static u_int new_rate;
+static u_int hardclock_divisor;
+
+
+void
+clkintr(frame)
+ struct clockframe frame;
+{
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
+ switch (timer0_state) {
+ case 0:
+ break;
+ case 1:
+ if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
+ hardclock(&frame);
+ timer0_prescale = 0;
+ }
+ break;
+ case 2:
+ disable_intr();
+ outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
+ outb(TIMER_CNTR0, TIMER_DIV(new_rate)%256);
+ outb(TIMER_CNTR0, TIMER_DIV(new_rate)/256);
+ enable_intr();
+ timer0_divisor = TIMER_DIV(new_rate);
+ timer0_prescale = 0;
+ timer_func = new_function;
+ timer0_state = 1;
+ break;
+ case 3:
+ if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
+ hardclock(&frame);
+ disable_intr();
+ outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
+ outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
+ outb(TIMER_CNTR0, TIMER_DIV(hz)/256);
+ enable_intr();
+ timer0_divisor = TIMER_DIV(hz);
+ timer0_prescale = 0;
+ timer_func = hardclock;;
+ timer0_state = 0;
+ }
+ break;
+ }
+}
+
+#endif
+
+int
+acquire_timer0(int rate, void (*function)() )
+{
+ if (timer0_state || !function)
+ return -1;
+
+ new_function = function;
+ new_rate = rate;
+ timer0_state = 2;
+ return 0;
+}
+
+
+int
+acquire_timer2(int mode)
+{
+ if (timer2_state)
+ return -1;
+ timer2_state = 1;
+ outb(TIMER_MODE, TIMER_SEL2 | (mode &0x3f));
+ return 0;
+}
+
+
+int
+release_timer0()
+{
+ if (!timer0_state)
+ return -1;
+ timer0_state = 3;
+ return 0;
+}
+
+
+int
+release_timer2()
+{
+ if (!timer2_state)
+ return -1;
+ timer2_state = 0;
+ outb(TIMER_MODE, TIMER_SEL2|TIMER_SQWAVE|TIMER_16BIT);
+ return 0;
+}
+
+
+static int
+getit()
+{
+ int high, low;
+
+ disable_intr();
+ /* select timer0 and latch counter value */
+ outb(TIMER_MODE, TIMER_SEL0);
+ low = inb(TIMER_CNTR0);
+ high = inb(TIMER_CNTR0);
+ enable_intr();
+ return ((high << 8) | low);
+}
+
+
+/*
+ * Wait "n" microseconds.
+ * Relies on timer 1 counting down from (TIMER_FREQ / hz)
+ * Note: timer had better have been programmed before this is first used!
+ */
+void
+DELAY(int n)
+{
+ int counter_limit, prev_tick, tick, ticks_left, sec, usec;
+
+#ifdef DELAYDEBUG
+ int getit_calls = 1;
+ int n1;
+ static int state = 0;
+
+ if (state == 0) {
+ state = 1;
+ for (n1 = 1; n1 <= 10000000; n1 *= 10)
+ DELAY(n1);
+ state = 2;
+ }
+ if (state == 1)
+ printf("DELAY(%d)...", n);
+#endif
+ /*
+ * Read the counter first, so that the rest of the setup overhead is
+ * counted. Guess the initial overhead is 20 usec (on most systems it
+ * takes about 1.5 usec for each of the i/o's in getit(). The loop
+ * takes about 6 usec on a 486/33 and 13 usec on a 386/20. The
+ * multiplications and divisions to scale the count take a while).
+ */
+ prev_tick = getit(0, 0);
+ n -= 20;
+ /*
+ * Calculate (n * (TIMER_FREQ / 1e6)) without using floating point
+ * and without any avoidable overflows.
+ */
+ sec = n / 1000000;
+ usec = n - sec * 1000000;
+ ticks_left = sec * TIMER_FREQ
+ + usec * (TIMER_FREQ / 1000000)
+ + usec * ((TIMER_FREQ % 1000000) / 1000) / 1000
+ + usec * (TIMER_FREQ % 1000) / 1000000;
+
+ while (ticks_left > 0) {
+ tick = getit(0, 0);
+#ifdef DELAYDEBUG
+ ++getit_calls;
+#endif
+ if (tick > prev_tick)
+ ticks_left -= prev_tick - (tick - timer0_divisor);
+ else
+ ticks_left -= prev_tick - tick;
+ prev_tick = tick;
+ }
+#ifdef DELAYDEBUG
+ if (state == 1)
+ printf(" %d calls to getit() at %d usec each\n",
+ getit_calls, (n + 5) / getit_calls);
+#endif
+}
+
+
+static void
+sysbeepstop()
+{
+ outb(IO_PPI, inb(IO_PPI)&0xFC); /* disable counter2 output to speaker */
+ release_timer2();
+ beeping = 0;
+}
+
+
+int
+sysbeep(int pitch, int period)
+{
+
+ if (acquire_timer2(TIMER_SQWAVE|TIMER_16BIT))
+ return -1;
+ disable_intr();
+ outb(TIMER_CNTR2, pitch);
+ outb(TIMER_CNTR2, (pitch>>8));
+ enable_intr();
+ if (!beeping) {
+ outb(IO_PPI, inb(IO_PPI) | 3); /* enable counter2 output to speaker */
+ beeping = period;
+ timeout(sysbeepstop, 0, period);
+ }
+ return 0;
+}
+
+
+void
+startrtclock()
+{
+ int s;
+
+ /* initialize 8253 clock */
+ outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
+
+ /* Correct rounding will buy us a better precision in timekeeping */
+ outb (IO_TIMER1, TIMER_DIV(hz)%256);
+ outb (IO_TIMER1, TIMER_DIV(hz)/256);
+ timer0_divisor = hardclock_divisor = TIMER_DIV(hz);
+
+ /* initialize brain-dead battery powered clock */
+ outb (IO_RTC, RTC_STATUSA);
+ outb (IO_RTC+1, 0x26);
+ outb (IO_RTC, RTC_STATUSB);
+ outb (IO_RTC+1, 2);
+
+ outb (IO_RTC, RTC_DIAG);
+ if (s = inb (IO_RTC+1))
+ printf("RTC BIOS diagnostic error %b\n", s, RTCDG_BITS);
+}
+
+
+/* convert 2 digit BCD number */
+int
+bcd(int i)
+{
+ return ((i/16)*10 + (i%16));
+}
+
+
+/* convert years to seconds (from 1970) */
+unsigned long
+ytos(int y)
+{
+ int i;
+ unsigned long ret;
+
+ ret = 0;
+ for(i = 1970; i < y; i++) {
+ if (i % 4) ret += 365*24*60*60;
+ else ret += 366*24*60*60;
+ }
+ return ret;
+}
+
+
+/* convert months to seconds */
+unsigned long
+mtos(int m, int leap)
+{
+ int i;
+ unsigned long ret;
+
+ ret = 0;
+ for(i=1; i<m; i++) {
+ switch(i){
+ case 1: case 3: case 5: case 7: case 8: case 10: case 12:
+ ret += 31*24*60*60; break;
+ case 4: case 6: case 9: case 11:
+ ret += 30*24*60*60; break;
+ case 2:
+ if (leap) ret += 29*24*60*60;
+ else ret += 28*24*60*60;
+ }
+ }
+ return ret;
+}
+
+
+/*
+ * Initialize the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+void
+inittodr(time_t base)
+{
+ unsigned long sec;
+ int leap, day_week, t, yd;
+ int sa,s;
+
+ /* do we have a realtime clock present? (otherwise we loop below) */
+ sa = rtcin(RTC_STATUSA);
+ if (sa == 0xff || sa == 0) return;
+
+ /* ready for a read? */
+ while ((sa&RTCSA_TUP) == RTCSA_TUP)
+ sa = rtcin(RTC_STATUSA);
+
+ sec = bcd(rtcin(RTC_YEAR)) + 1900;
+ if (sec < 1970)
+ sec += 100;
+
+ leap = !(sec % 4); sec = ytos(sec); /* year */
+ yd = mtos(bcd(rtcin(RTC_MONTH)),leap); sec+=yd; /* month */
+ t = (bcd(rtcin(RTC_DAY))-1) * 24*60*60; sec+=t; yd+=t; /* date */
+ day_week = rtcin(RTC_WDAY); /* day */
+ sec += bcd(rtcin(RTC_HRS)) * 60*60; /* hour */
+ sec += bcd(rtcin(RTC_MIN)) * 60; /* minutes */
+ sec += bcd(rtcin(RTC_SEC)); /* seconds */
+ sec += tz.tz_minuteswest * 60;
+ time.tv_sec = sec;
+}
+
+
+#ifdef garbage
+/*
+ * Initialze the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+test_inittodr(time_t base)
+{
+
+ outb(IO_RTC,9); /* year */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,8); /* month */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,7); /* day */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,4); /* hour */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,2); /* minutes */
+ printf("%d ",bcd(inb(IO_RTC+1)));
+ outb(IO_RTC,0); /* seconds */
+ printf("%d\n",bcd(inb(IO_RTC+1)));
+
+ time.tv_sec = base;
+}
+#endif
+
+/*
+ * Wire clock interrupt in.
+ */
+#define V(s) __CONCAT(V, s)
+extern void V(clk)();
+
+
+void
+enablertclock()
+{
+ setidt(ICU_OFFSET+0, &V(clk), SDT_SYS386IGT, SEL_KPL);
+ INTREN(IRQ0);
+}
+
+
+/*
+ * Delay for some number of milliseconds.
+ */
+void
+spinwait(int millisecs)
+{
+ DELAY(1000 * millisecs);
+}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/amd64/isa/icu.h b/sys/amd64/isa/icu.h
new file mode 100644
index 0000000..13216b0
--- /dev/null
+++ b/sys/amd64/isa/icu.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)icu.h 5.6 (Berkeley) 5/9/91
+ * $Id: icu.h,v 1.2 1993/10/16 13:45:51 rgrimes Exp $
+ */
+
+/*
+ * AT/386 Interrupt Control constants
+ * W. Jolitz 8/89
+ */
+
+#ifndef __ICU__
+#define __ICU__
+
+#ifndef LOCORE
+
+/*
+ * Interrupt "level" mechanism variables, masks, and macros
+ */
+extern unsigned imen; /* interrupt mask enable */
+
+#define INTREN(s) (imen &= ~(s), SET_ICUS())
+#define INTRDIS(s) (imen |= (s), SET_ICUS())
+#define INTRMASK(msk,s) (msk |= (s))
+#if 0
+#define SET_ICUS() (outb(IO_ICU1 + 1, imen), outb(IU_ICU2 + 1, imen >> 8))
+#else
+/*
+ * XXX - IO_ICU* are defined in isa.h, not icu.h, and nothing much bothers to
+ * include isa.h, while too many things include icu.h.
+ */
+#define SET_ICUS() (outb(0x21, imen), outb(0xa1, imen >> 8))
+#endif
+
+#endif
+
+/*
+ * Interrupt enable bits - in normal order of priority (which we change)
+ */
+#define IRQ0 0x0001 /* highest priority - timer */
+#define IRQ1 0x0002
+#define IRQ_SLAVE 0x0004
+#define IRQ8 0x0100
+#define IRQ9 0x0200
+#define IRQ2 IRQ9
+#define IRQ10 0x0400
+#define IRQ11 0x0800
+#define IRQ12 0x1000
+#define IRQ13 0x2000
+#define IRQ14 0x4000
+#define IRQ15 0x8000
+#define IRQ3 0x0008 /* this is highest after rotation */
+#define IRQ4 0x0010
+#define IRQ5 0x0020
+#define IRQ6 0x0040
+#define IRQ7 0x0080 /* lowest - parallel printer */
+
+/*
+ * Interrupt Control offset into Interrupt descriptor table (IDT)
+ */
+#define ICU_OFFSET 32 /* 0-31 are processor exceptions */
+#define ICU_LEN 16 /* 32-47 are ISA interrupts */
+
+#endif __ICU__
diff --git a/sys/amd64/isa/isa.c b/sys/amd64/isa/isa.c
new file mode 100644
index 0000000..32e59e7
--- /dev/null
+++ b/sys/amd64/isa/isa.c
@@ -0,0 +1,671 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)isa.c 7.2 (Berkeley) 5/13/91
+ * $Id: isa.c,v 1.16 1994/04/02 20:43:25 ache Exp $
+ */
+
+/*
+ * code to manage AT bus
+ *
+ * 92/08/18 Frank P. MacLachlan (fpm@crash.cts.com):
+ * Fixed uninitialized variable problem and added code to deal
+ * with DMA page boundaries in isa_dmarangecheck(). Fixed word
+ * mode DMA count compution and reorganized DMA setup code in
+ * isa_dmastart()
+ */
+
+#include "param.h"
+#include "systm.h" /* isn't it a joy */
+#include "kernel.h" /* to have three of these */
+#include "conf.h"
+#include "file.h"
+#include "buf.h"
+#include "uio.h"
+#include "syslog.h"
+#include "malloc.h"
+#include "rlist.h"
+#include "machine/segments.h"
+#include "vm/vm.h"
+#include <machine/spl.h>
+#include "i386/isa/isa_device.h"
+#include "i386/isa/isa.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/ic/i8237.h"
+#include "i386/isa/ic/i8042.h"
+
+/*
+** Register definitions for DMA controller 1 (channels 0..3):
+*/
+#define DMA1_CHN(c) (IO_DMA1 + 1*(2*(c))) /* addr reg for channel c */
+#define DMA1_SMSK (IO_DMA1 + 1*10) /* single mask register */
+#define DMA1_MODE (IO_DMA1 + 1*11) /* mode register */
+#define DMA1_FFC (IO_DMA1 + 1*12) /* clear first/last FF */
+
+/*
+** Register definitions for DMA controller 2 (channels 4..7):
+*/
+#define DMA2_CHN(c) (IO_DMA2 + 2*(2*(c))) /* addr reg for channel c */
+#define DMA2_SMSK (IO_DMA2 + 2*10) /* single mask register */
+#define DMA2_MODE (IO_DMA2 + 2*11) /* mode register */
+#define DMA2_FFC (IO_DMA2 + 2*12) /* clear first/last FF */
+
+void config_isadev __P((struct isa_device *, u_int *));
+
+/*
+ * print a conflict message
+ */
+void
+conflict(dvp, tmpdvp, item, reason, format)
+ struct isa_device *dvp, *tmpdvp;
+ int item;
+ char *reason;
+ char *format;
+{
+ printf("%s%d not probed due to %s conflict with %s%d at ",
+ dvp->id_driver->name, dvp->id_unit, reason,
+ tmpdvp->id_driver->name, tmpdvp->id_unit);
+ printf(format, item);
+ printf("\n");
+}
+
+/*
+ * Check to see if things are alread in use, like IRQ's, I/O addresses
+ * and Memory addresses.
+ */
+int
+haveseen(dvp, tmpdvp)
+ struct isa_device *dvp, *tmpdvp;
+{
+ int status = 0;
+
+ /*
+ * Only check against devices that have already been found
+ */
+ if (tmpdvp->id_alive) {
+ /*
+ * Check for I/O address conflict. We can only check the
+ * starting address of the device against the range of the
+ * device that has already been probed since we do not
+ * know how many I/O addresses this device uses.
+ */
+ if (tmpdvp->id_alive != -1) {
+ if ((dvp->id_iobase >= tmpdvp->id_iobase) &&
+ (dvp->id_iobase <=
+ (tmpdvp->id_iobase + tmpdvp->id_alive - 1))) {
+ conflict(dvp, tmpdvp, dvp->id_iobase,
+ "I/O address", "0x%x");
+ status = 1;
+ }
+ }
+ /*
+ * Check for Memory address conflict. We can check for
+ * range overlap, but it will not catch all cases since the
+ * driver may adjust the msize paramater during probe, for
+ * now we just check that the starting address does not
+ * fall within any allocated region.
+ * XXX could add a second check after the probe for overlap,
+ * since at that time we would know the full range.
+ * XXX KERNBASE is a hack, we should have vaddr in the table!
+ */
+ if(tmpdvp->id_maddr) {
+ if((KERNBASE + dvp->id_maddr >= tmpdvp->id_maddr) &&
+ (KERNBASE + dvp->id_maddr <=
+ (tmpdvp->id_maddr + tmpdvp->id_msize - 1))) {
+ conflict(dvp, tmpdvp, dvp->id_maddr, "maddr",
+ "0x%x");
+ status = 1;
+ }
+ }
+#ifndef COM_MULTIPORT
+ /*
+ * Check for IRQ conflicts.
+ */
+ if(tmpdvp->id_irq) {
+ if (tmpdvp->id_irq == dvp->id_irq) {
+ conflict(dvp, tmpdvp, ffs(dvp->id_irq) - 1,
+ "irq", "%d");
+ status = 1;
+ }
+ }
+#endif
+ /*
+ * Check for DRQ conflicts.
+ */
+ if(tmpdvp->id_drq != -1) {
+ if (tmpdvp->id_drq == dvp->id_drq) {
+ conflict(dvp, tmpdvp, dvp->id_drq,
+ "drq", "%d");
+ status = 1;
+ }
+ }
+ }
+ return (status);
+}
+
+/*
+ * Search through all the isa_devtab_* tables looking for anything that
+ * conflicts with the current device.
+ */
+int
+haveseen_isadev(dvp)
+ struct isa_device *dvp;
+{
+ struct isa_device *tmpdvp;
+ int status = 0;
+
+ for (tmpdvp = isa_devtab_tty; tmpdvp->id_driver; tmpdvp++) {
+ status |= haveseen(dvp, tmpdvp);
+ }
+ for (tmpdvp = isa_devtab_bio; tmpdvp->id_driver; tmpdvp++) {
+ status |= haveseen(dvp, tmpdvp);
+ }
+ for (tmpdvp = isa_devtab_net; tmpdvp->id_driver; tmpdvp++) {
+ status |= haveseen(dvp, tmpdvp);
+ }
+ for (tmpdvp = isa_devtab_null; tmpdvp->id_driver; tmpdvp++) {
+ status |= haveseen(dvp, tmpdvp);
+ }
+ return(status);
+}
+
+/*
+ * Configure all ISA devices
+ */
+void
+isa_configure() {
+ struct isa_device *dvp;
+
+ enable_intr();
+ splhigh();
+ INTREN(IRQ_SLAVE);
+ printf("Probing for devices on the ISA bus:\n");
+ for (dvp = isa_devtab_tty; dvp->id_driver; dvp++) {
+ if (!haveseen_isadev(dvp))
+ config_isadev(dvp,&tty_imask);
+ }
+ for (dvp = isa_devtab_bio; dvp->id_driver; dvp++) {
+ if (!haveseen_isadev(dvp))
+ config_isadev(dvp,&bio_imask);
+ }
+ for (dvp = isa_devtab_net; dvp->id_driver; dvp++) {
+ if (!haveseen_isadev(dvp))
+ config_isadev(dvp,&net_imask);
+ }
+ for (dvp = isa_devtab_null; dvp->id_driver; dvp++) {
+ if (!haveseen_isadev(dvp))
+ config_isadev(dvp,(u_int *) NULL);
+ }
+ bio_imask |= SWI_CLOCK_MASK;
+ net_imask |= SWI_NET_MASK;
+ tty_imask |= SWI_TTY_MASK;
+
+/*
+ * XXX we should really add the tty device to net_imask when the line is
+ * switched to SLIPDISC, and then remove it when it is switched away from
+ * SLIPDISC. No need to block out ALL ttys during a splimp when only one
+ * of them is running slip.
+ *
+ * XXX actually, blocking all ttys during a splimp doesn't matter so much
+ * with sio because the serial interrupt layer doesn't use tty_imask. Only
+ * non-serial ttys suffer. It's more stupid that ALL 'net's are blocked
+ * during spltty.
+ */
+#include "sl.h"
+#if NSL > 0
+ net_imask |= tty_imask;
+ tty_imask = net_imask;
+#endif
+ /* bio_imask |= tty_imask ; can some tty devices use buffers? */
+#ifdef DIAGNOSTIC
+ printf("bio_imask %x tty_imask %x net_imask %x\n",
+ bio_imask, tty_imask, net_imask);
+#endif
+ splnone();
+}
+
+/*
+ * Configure an ISA device.
+ */
+void
+config_isadev(isdp, mp)
+ struct isa_device *isdp;
+ u_int *mp;
+{
+ struct isa_driver *dp = isdp->id_driver;
+
+ if (isdp->id_maddr) {
+ extern u_int atdevbase;
+
+ isdp->id_maddr -= 0xa0000; /* XXX should be a define */
+ isdp->id_maddr += atdevbase;
+ }
+ isdp->id_alive = (*dp->probe)(isdp);
+ if (isdp->id_alive) {
+ /*
+ * Only print the I/O address range if id_alive != -1
+ * Right now this is a temporary fix just for the new
+ * NPX code so that if it finds a 486 that can use trap
+ * 16 it will not report I/O addresses.
+ * Rod Grimes 04/26/94
+ */
+ printf("%s%d", dp->name, isdp->id_unit);
+ if (isdp->id_alive != -1) {
+ printf(" at 0x%x", isdp->id_iobase);
+ if ((isdp->id_iobase + isdp->id_alive - 1) !=
+ isdp->id_iobase) {
+ printf("-0x%x",
+ isdp->id_iobase +
+ isdp->id_alive - 1);
+ }
+ }
+ if(isdp->id_irq)
+ printf(" irq %d", ffs(isdp->id_irq) - 1);
+ if (isdp->id_drq != -1)
+ printf(" drq %d", isdp->id_drq);
+ if (isdp->id_maddr)
+ printf(" maddr 0x%x", kvtop(isdp->id_maddr));
+ if (isdp->id_msize)
+ printf(" msize %d", isdp->id_msize);
+ if (isdp->id_flags)
+ printf(" flags 0x%x", isdp->id_flags);
+ if (isdp->id_iobase) {
+ if (isdp->id_iobase < 0x100) {
+ printf(" on motherboard\n");
+ } else {
+ if (isdp->id_iobase >= 0x1000) {
+ printf (" on eisa\n");
+ } else {
+ printf (" on isa\n");
+ }
+ }
+ }
+
+ (*dp->attach)(isdp);
+
+ if(isdp->id_irq) {
+ int intrno;
+
+ intrno = ffs(isdp->id_irq)-1;
+ setidt(ICU_OFFSET+intrno, isdp->id_intr,
+ SDT_SYS386IGT, SEL_KPL);
+ if(mp) {
+ INTRMASK(*mp,isdp->id_irq);
+ }
+ INTREN(isdp->id_irq);
+ }
+ } else {
+ printf("%s%d not found", dp->name, isdp->id_unit);
+ if (isdp->id_iobase) {
+ printf(" at 0x%x", isdp->id_iobase);
+ }
+ printf("\n");
+ }
+}
+
+#define IDTVEC(name) __CONCAT(X,name)
+/* default interrupt vector table entries */
+typedef void inthand_t();
+typedef void (*inthand_func_t)();
+extern inthand_t
+ IDTVEC(intr0), IDTVEC(intr1), IDTVEC(intr2), IDTVEC(intr3),
+ IDTVEC(intr4), IDTVEC(intr5), IDTVEC(intr6), IDTVEC(intr7),
+ IDTVEC(intr8), IDTVEC(intr9), IDTVEC(intr10), IDTVEC(intr11),
+ IDTVEC(intr12), IDTVEC(intr13), IDTVEC(intr14), IDTVEC(intr15);
+
+static inthand_func_t defvec[ICU_LEN] = {
+ &IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
+ &IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
+ &IDTVEC(intr8), &IDTVEC(intr9), &IDTVEC(intr10), &IDTVEC(intr11),
+ &IDTVEC(intr12), &IDTVEC(intr13), &IDTVEC(intr14), &IDTVEC(intr15) };
+
+/*
+ * Fill in default interrupt table (in case of spuruious interrupt
+ * during configuration of kernel, setup interrupt control unit
+ */
+void
+isa_defaultirq()
+{
+ int i;
+
+ /* icu vectors */
+ for (i = 0; i < ICU_LEN; i++)
+ setidt(ICU_OFFSET + i, defvec[i], SDT_SYS386IGT, SEL_KPL);
+
+ /* initialize 8259's */
+ outb(IO_ICU1, 0x11); /* reset; program device, four bytes */
+ outb(IO_ICU1+1, NRSVIDT); /* starting at this vector index */
+ outb(IO_ICU1+1, 1<<2); /* slave on line 2 */
+#ifdef AUTO_EOI_1
+ outb(IO_ICU1+1, 2 | 1); /* auto EOI, 8086 mode */
+#else
+ outb(IO_ICU1+1, 1); /* 8086 mode */
+#endif
+ outb(IO_ICU1+1, 0xff); /* leave interrupts masked */
+ outb(IO_ICU1, 0x0a); /* default to IRR on read */
+ outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */
+
+ outb(IO_ICU2, 0x11); /* reset; program device, four bytes */
+ outb(IO_ICU2+1, NRSVIDT+8); /* staring at this vector index */
+ outb(IO_ICU2+1,2); /* my slave id is 2 */
+#ifdef AUTO_EOI_2
+ outb(IO_ICU2+1, 2 | 1); /* auto EOI, 8086 mode */
+#else
+ outb(IO_ICU2+1,1); /* 8086 mode */
+#endif
+ outb(IO_ICU2+1, 0xff); /* leave interrupts masked */
+ outb(IO_ICU2, 0x0a); /* default to IRR on read */
+}
+
+/* region of physical memory known to be contiguous */
+vm_offset_t isaphysmem;
+static caddr_t dma_bounce[8]; /* XXX */
+static char bounced[8]; /* XXX */
+#define MAXDMASZ 512 /* XXX */
+
+/* high byte of address is stored in this port for i-th dma channel */
+static short dmapageport[8] =
+ { 0x87, 0x83, 0x81, 0x82, 0x8f, 0x8b, 0x89, 0x8a };
+
+/*
+ * isa_dmacascade(): program 8237 DMA controller channel to accept
+ * external dma control by a board.
+ */
+void isa_dmacascade(unsigned chan)
+{
+ if (chan > 7)
+ panic("isa_dmacascade: impossible request");
+
+ /* set dma channel mode, and set dma channel mode */
+ if ((chan & 4) == 0) {
+ outb(DMA1_MODE, DMA37MD_CASCADE | chan);
+ outb(DMA1_SMSK, chan);
+ } else {
+ outb(DMA2_MODE, DMA37MD_CASCADE | (chan & 3));
+ outb(DMA2_SMSK, chan & 3);
+ }
+}
+
+/*
+ * isa_dmastart(): program 8237 DMA controller channel, avoid page alignment
+ * problems by using a bounce buffer.
+ */
+void isa_dmastart(int flags, caddr_t addr, unsigned nbytes, unsigned chan)
+{ vm_offset_t phys;
+ int waport;
+ caddr_t newaddr;
+
+ if ( chan > 7
+ || (chan < 4 && nbytes > (1<<16))
+ || (chan >= 4 && (nbytes > (1<<17) || (u_int)addr & 1)))
+ panic("isa_dmastart: impossible request");
+
+ if (isa_dmarangecheck(addr, nbytes, chan)) {
+ if (dma_bounce[chan] == 0)
+ dma_bounce[chan] =
+ /*(caddr_t)malloc(MAXDMASZ, M_TEMP, M_WAITOK);*/
+ (caddr_t) isaphysmem + NBPG*chan;
+ bounced[chan] = 1;
+ newaddr = dma_bounce[chan];
+ *(int *) newaddr = 0; /* XXX */
+
+ /* copy bounce buffer on write */
+ if (!(flags & B_READ))
+ bcopy(addr, newaddr, nbytes);
+ addr = newaddr;
+ }
+
+ /* translate to physical */
+ phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
+
+ if ((chan & 4) == 0) {
+ /*
+ * Program one of DMA channels 0..3. These are
+ * byte mode channels.
+ */
+ /* set dma channel mode, and reset address ff */
+ if (flags & B_READ)
+ outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|chan);
+ else
+ outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_READ|chan);
+ outb(DMA1_FFC, 0);
+
+ /* send start address */
+ waport = DMA1_CHN(chan);
+ outb(waport, phys);
+ outb(waport, phys>>8);
+ outb(dmapageport[chan], phys>>16);
+
+ /* send count */
+ outb(waport + 1, --nbytes);
+ outb(waport + 1, nbytes>>8);
+
+ /* unmask channel */
+ outb(DMA1_SMSK, chan);
+ } else {
+ /*
+ * Program one of DMA channels 4..7. These are
+ * word mode channels.
+ */
+ /* set dma channel mode, and reset address ff */
+ if (flags & B_READ)
+ outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|(chan&3));
+ else
+ outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_READ|(chan&3));
+ outb(DMA2_FFC, 0);
+
+ /* send start address */
+ waport = DMA2_CHN(chan - 4);
+ outb(waport, phys>>1);
+ outb(waport, phys>>9);
+ outb(dmapageport[chan], phys>>16);
+
+ /* send count */
+ nbytes >>= 1;
+ outb(waport + 2, --nbytes);
+ outb(waport + 2, nbytes>>8);
+
+ /* unmask channel */
+ outb(DMA2_SMSK, chan & 3);
+ }
+}
+
+void isa_dmadone(int flags, caddr_t addr, int nbytes, int chan)
+{
+
+ /* copy bounce buffer on read */
+ /*if ((flags & (B_PHYS|B_READ)) == (B_PHYS|B_READ))*/
+ if (bounced[chan]) {
+ bcopy(dma_bounce[chan], addr, nbytes);
+ bounced[chan] = 0;
+ }
+}
+
+/*
+ * Check for problems with the address range of a DMA transfer
+ * (non-contiguous physical pages, outside of bus address space,
+ * crossing DMA page boundaries).
+ * Return true if special handling needed.
+ */
+
+int
+isa_dmarangecheck(caddr_t va, unsigned length, unsigned chan) {
+ vm_offset_t phys, priorpage = 0, endva;
+ u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
+
+ endva = (vm_offset_t)round_page(va + length);
+ for (; va < (caddr_t) endva ; va += NBPG) {
+ phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
+#define ISARAM_END RAM_END
+ if (phys == 0)
+ panic("isa_dmacheck: no physical page present");
+ if (phys >= ISARAM_END)
+ return (1);
+ if (priorpage) {
+ if (priorpage + NBPG != phys)
+ return (1);
+ /* check if crossing a DMA page boundary */
+ if (((u_int)priorpage ^ (u_int)phys) & dma_pgmsk)
+ return (1);
+ }
+ priorpage = phys;
+ }
+ return (0);
+}
+
+/* head of queue waiting for physmem to become available */
+struct buf isa_physmemq;
+
+/* blocked waiting for resource to become free for exclusive use */
+static isaphysmemflag;
+/* if waited for and call requested when free (B_CALL) */
+static void (*isaphysmemunblock)(); /* needs to be a list */
+
+/*
+ * Allocate contiguous physical memory for transfer, returning
+ * a *virtual* address to region. May block waiting for resource.
+ * (assumed to be called at splbio())
+ */
+caddr_t
+isa_allocphysmem(caddr_t va, unsigned length, void (*func)()) {
+
+ isaphysmemunblock = func;
+ while (isaphysmemflag & B_BUSY) {
+ isaphysmemflag |= B_WANTED;
+ tsleep((caddr_t)&isaphysmemflag, PRIBIO, "isaphys", 0);
+ }
+ isaphysmemflag |= B_BUSY;
+
+ return((caddr_t)isaphysmem);
+}
+
+/*
+ * Free contiguous physical memory used for transfer.
+ * (assumed to be called at splbio())
+ */
+void
+isa_freephysmem(caddr_t va, unsigned length) {
+
+ isaphysmemflag &= ~B_BUSY;
+ if (isaphysmemflag & B_WANTED) {
+ isaphysmemflag &= B_WANTED;
+ wakeup((caddr_t)&isaphysmemflag);
+ if (isaphysmemunblock)
+ (*isaphysmemunblock)();
+ }
+}
+
+/*
+ * Handle a NMI, possibly a machine check.
+ * return true to panic system, false to ignore.
+ */
+int
+isa_nmi(cd)
+ int cd;
+{
+
+ log(LOG_CRIT, "\nNMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70));
+ return(0);
+}
+
+/*
+ * Caught a stray interrupt, notify
+ */
+void
+isa_strayintr(d)
+ int d;
+{
+
+ /* DON'T BOTHER FOR NOW! */
+ /* for some reason, we get bursts of intr #7, even if not enabled! */
+ /*
+ * Well the reason you got bursts of intr #7 is because someone
+ * raised an interrupt line and dropped it before the 8259 could
+ * prioritize it. This is documented in the intel data book. This
+ * means you have BAD hardware! I have changed this so that only
+ * the first 5 get logged, then it quits logging them, and puts
+ * out a special message. rgrimes 3/25/1993
+ */
+ extern u_long intrcnt_stray;
+
+ intrcnt_stray++;
+ if (intrcnt_stray <= 5)
+ log(LOG_ERR,"ISA strayintr %x\n", d);
+ if (intrcnt_stray == 5)
+ log(LOG_CRIT,"Too many ISA strayintr not logging any more\n");
+}
+
+/*
+ * find an ISA device in a given isa_devtab_* table, given
+ * the table to search, the expected id_driver entry, and the unit number.
+ *
+ * this function is defined in isa_device.h, and this location is debatable;
+ * i put it there because it's useless w/o, and directly operates on
+ * the other stuff in that file.
+ *
+ */
+
+struct isa_device *find_isadev(table, driverp, unit)
+ struct isa_device *table;
+ struct isa_driver *driverp;
+ int unit;
+{
+ if (driverp == NULL) /* sanity check */
+ return NULL;
+
+ while ((table->id_driver != driverp) || (table->id_unit != unit)) {
+ if (table->id_driver == 0)
+ return NULL;
+
+ table++;
+ }
+
+ return table;
+}
+
+/*
+ * Return nonzero if a (masked) irq is pending for a given device.
+ */
+int
+isa_irq_pending(dvp)
+ struct isa_device *dvp;
+{
+ unsigned id_irq;
+
+ id_irq = (unsigned short) dvp->id_irq; /* XXX silly type in struct */
+ if (id_irq & 0xff)
+ return (inb(IO_ICU1) & id_irq);
+ return (inb(IO_ICU2) & (id_irq >> 8));
+}
diff --git a/sys/amd64/isa/isa.h b/sys/amd64/isa/isa.h
new file mode 100644
index 0000000..e2a26e7
--- /dev/null
+++ b/sys/amd64/isa/isa.h
@@ -0,0 +1,181 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)isa.h 5.7 (Berkeley) 5/9/91
+ * $Id: isa.h,v 1.4 1994/01/05 15:03:28 rgrimes Exp $
+ */
+
+#ifndef _I386_ISA_ISA_H_
+#define _I386_ISA_ISA_H_ 1
+
+/*
+ * ISA Bus conventions
+ */
+
+#ifndef LOCORE
+#include <sys/cdefs.h>
+
+extern unsigned int atdevbase; /* offset in virtual memory of ISA io mem */
+unsigned char rtcin __P((int));
+#endif
+
+
+/*
+ * Input / Output Port Assignments
+ */
+
+#ifndef IO_BEGIN
+#define IO_ISABEGIN 0x000 /* 0x000 - Beginning of I/O Registers */
+
+ /* CPU Board */
+#define IO_DMA1 0x000 /* 8237A DMA Controller #1 */
+#define IO_ICU1 0x020 /* 8259A Interrupt Controller #1 */
+#define IO_TIMER1 0x040 /* 8253 Timer #1 */
+#define IO_TIMER2 0x048 /* 8253 Timer #2 */
+#define IO_KBD 0x060 /* 8042 Keyboard */
+#define IO_PPI 0x061 /* Programmabel Peripheral Interface */
+#define IO_RTC 0x070 /* RTC */
+#define IO_NMI IO_RTC /* NMI Control */
+#define IO_DMAPG 0x080 /* DMA Page Registers */
+#define IO_ICU2 0x0A0 /* 8259A Interrupt Controller #2 */
+#define IO_DMA2 0x0C0 /* 8237A DMA Controller #2 */
+#define IO_NPX 0x0F0 /* Numeric Coprocessor */
+
+ /* Cards */
+ /* 0x100 - 0x16F Open */
+
+#define IO_WD2 0x170 /* Secondary Fixed Disk Controller */
+
+ /* 0x178 - 0x1EF Open */
+
+#define IO_WD1 0x1f0 /* Primary Fixed Disk Controller */
+#define IO_GAME 0x200 /* Game Controller */
+
+ /* 0x208 - 0x277 Open */
+
+#define IO_LPT2 0x278 /* Parallel Port #2 */
+
+ /* 0x280 - 0x2E7 Open */
+
+#define IO_COM4 0x2e8 /* COM4 i/o address */
+
+ /* 0x2F0 - 0x2F7 Open */
+
+#define IO_COM2 0x2f8 /* COM2 i/o address */
+ /* 0x300 - 0x32F Open */
+
+#define IO_BT0 0x330 /* bustek 742a default addr. */
+#define IO_AHA0 0x330 /* adaptec 1542 default addr. */
+#define IO_UHA0 0x330 /* ultrastore 14f default addr. */
+#define IO_BT1 0x334 /* bustek 742a default addr. */
+#define IO_AHA1 0x334 /* adaptec 1542 default addr. */
+ /* 0x338 - 0x36F Open */
+
+#define IO_FD2 0x370 /* secondary base i/o address */
+#define IO_LPT1 0x378 /* Parallel Port #1 */
+
+ /* 0x380 - 0x3AF Open */
+
+#define IO_MDA 0x3B0 /* Monochome Adapter */
+#define IO_LPT3 0x3BC /* Monochome Adapter Printer Port */
+#define IO_VGA 0x3C0 /* E/VGA Ports */
+#define IO_CGA 0x3D0 /* CGA Ports */
+
+ /* 0x3E0 - 0x3E7 Open */
+
+#define IO_COM3 0x3e8 /* COM3 i/o address */
+#define IO_FD1 0x3f0 /* primary base i/o address */
+#define IO_COM1 0x3f8 /* COM1 i/o address */
+
+#define IO_ISAEND 0x3FF /* - 0x3FF End of I/O Registers */
+#endif IO_ISABEGIN
+
+/*
+ * Input / Output Port Sizes - these are from several sources, and tend
+ * to be the larger of what was found, ie COM ports can be 4, but some
+ * boards do not fully decode the address, thus 8 ports are used.
+ */
+
+#ifndef IO_ISASIZES
+#define IO_ISASIZES
+
+#define IO_COMSIZE 8 /* 8250, 16X50 com controllers (4?) */
+#define IO_CGASIZE 16 /* CGA controllers */
+#define IO_DMASIZE 16 /* 8237 DMA controllers */
+#define IO_DPGSIZE 32 /* 74LS612 DMA page reisters */
+#define IO_FDCSIZE 8 /* Nec765 floppy controllers */
+#define IO_WDCSIZE 8 /* WD compatible disk controllers */
+#define IO_GAMSIZE 16 /* AT compatible game controllers */
+#define IO_ICUSIZE 16 /* 8259A interrupt controllers */
+#define IO_KBDSIZE 16 /* 8042 Keyboard controllers */
+#define IO_LPTSIZE 8 /* LPT controllers, some use only 4 */
+#define IO_MDASIZE 16 /* Monochrome display controllers */
+#define IO_RTCSIZE 16 /* CMOS real time clock, NMI control */
+#define IO_TMRSIZE 16 /* 8253 programmable timers */
+#define IO_NPXSIZE 16 /* 80387/80487 NPX registers */
+#define IO_VGASIZE 16 /* VGA controllers */
+
+#endif /* IO_ISASIZES */
+
+/*
+ * Input / Output Memory Physical Addresses
+ */
+
+#ifndef IOM_BEGIN
+#define IOM_BEGIN 0x0a0000 /* Start of I/O Memory "hole" */
+#define IOM_END 0x100000 /* End of I/O Memory "hole" */
+#define IOM_SIZE (IOM_END - IOM_BEGIN)
+#endif IOM_BEGIN
+
+/*
+ * RAM Physical Address Space (ignoring the above mentioned "hole")
+ */
+
+#ifndef RAM_BEGIN
+#define RAM_BEGIN 0x0000000 /* Start of RAM Memory */
+#define RAM_END 0x1000000 /* End of RAM Memory */
+#define RAM_SIZE (RAM_END - RAM_BEGIN)
+#endif RAM_BEGIN
+
+/*
+ * Oddball Physical Memory Addresses
+ */
+#ifndef COMPAQ_RAMRELOC
+#define COMPAQ_RAMRELOC 0x80c00000 /* Compaq RAM relocation/diag */
+#define COMPAQ_RAMSETUP 0x80c00002 /* Compaq RAM setup */
+#define WEITEK_FPU 0xC0000000 /* WTL 2167 */
+#define CYRIX_EMC 0xC0000000 /* Cyrix EMC */
+#endif COMPAQ_RAMRELOC
+#endif /* _I386_ISA_ISA_H_ */
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
new file mode 100644
index 0000000..a3ce1e2
--- /dev/null
+++ b/sys/amd64/isa/npx.c
@@ -0,0 +1,554 @@
+/*-
+ * Copyright (c) 1990 William Jolitz.
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
+ * $Id: npx.c,v 1.6 1994/01/03 07:55:43 davidg Exp $
+ */
+
+#include "npx.h"
+#if NNPX > 0
+
+#include "param.h"
+#include "systm.h"
+#include "conf.h"
+#include "file.h"
+#include "proc.h"
+#include "machine/cpu.h"
+#include "machine/pcb.h"
+#include "machine/trap.h"
+#include "ioctl.h"
+#include "machine/specialreg.h"
+#include "i386/isa/icu.h"
+#include "i386/isa/isa_device.h"
+#include "i386/isa/isa.h"
+
+/*
+ * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
+ */
+
+#ifdef __GNUC__
+
+#define disable_intr() __asm("cli")
+#define enable_intr() __asm("sti")
+#define fldcw(addr) __asm("fldcw %0" : : "m" (*addr))
+#define fnclex() __asm("fnclex")
+#define fninit() __asm("fninit")
+#define fnsave(addr) __asm("fnsave %0" : "=m" (*addr) : "0" (*addr))
+#define fnstcw(addr) __asm("fnstcw %0" : "=m" (*addr) : "0" (*addr))
+#define fnstsw(addr) __asm("fnstsw %0" : "=m" (*addr) : "0" (*addr))
+#define fp_divide_by_0() __asm("fldz; fld1; fdiv %st,%st(1); fwait")
+#define frstor(addr) __asm("frstor %0" : : "m" (*addr))
+#define fwait() __asm("fwait")
+#define read_eflags() ({u_long ef; \
+ __asm("pushf; popl %0" : "=a" (ef)); \
+ ef; })
+#define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
+ : : "n" (CR0_TS) : "ax")
+#define stop_emulating() __asm("clts")
+#define write_eflags(ef) __asm("pushl %0; popf" : : "a" ((u_long) ef))
+
+#else /* not __GNUC__ */
+
+void disable_intr __P((void));
+void enable_intr __P((void));
+void fldcw __P((caddr_t addr));
+void fnclex __P((void));
+void fninit __P((void));
+void fnsave __P((caddr_t addr));
+void fnstcw __P((caddr_t addr));
+void fnstsw __P((caddr_t addr));
+void fp_divide_by_0 __P((void));
+void frstor __P((caddr_t addr));
+void fwait __P((void));
+u_long read_eflags __P((void));
+void start_emulating __P((void));
+void stop_emulating __P((void));
+void write_eflags __P((u_long ef));
+
+#endif /* __GNUC__ */
+
+typedef u_char bool_t;
+
+extern struct gate_descriptor idt[];
+
+int npxdna __P((void));
+void npxexit __P((struct proc *p));
+void npxinit __P((u_int control));
+void npxintr __P((struct intrframe frame));
+void npxsave __P((struct save87 *addr));
+static int npxattach __P((struct isa_device *dvp));
+static int npxprobe __P((struct isa_device *dvp));
+static int npxprobe1 __P((struct isa_device *dvp));
+
+struct isa_driver npxdriver = {
+ npxprobe, npxattach, "npx",
+};
+
+u_int npx0_imask;
+struct proc *npxproc;
+
+static bool_t npx_ex16;
+static bool_t npx_exists;
+static struct gate_descriptor npx_idt_probeintr;
+static int npx_intrno;
+static volatile u_int npx_intrs_while_probing;
+static bool_t npx_irq13;
+static volatile u_int npx_traps_while_probing;
+
+/*
+ * Special interrupt handlers. Someday intr0-intr15 will be used to count
+ * interrupts. We'll still need a special exception 16 handler. The busy
+ * latch stuff in probintr() can be moved to npxprobe().
+ */
+void probeintr(void);
+asm
+("
+ .text
+_probeintr:
+ ss
+ incl _npx_intrs_while_probing
+ pushl %eax
+ movb $0x20,%al # EOI (asm in strings loses cpp features)
+ outb %al,$0xa0 # IO_ICU2
+ outb %al,$0x20 #IO_ICU1
+ movb $0,%al
+ outb %al,$0xf0 # clear BUSY# latch
+ popl %eax
+ iret
+");
+
+void probetrap(void);
+asm
+("
+ .text
+_probetrap:
+ ss
+ incl _npx_traps_while_probing
+ fnclex
+ iret
+");
+
+/*
+ * Probe routine. Initialize cr0 to give correct behaviour for [f]wait
+ * whether the device exists or not (XXX should be elsewhere). Set flags
+ * to tell npxattach() what to do. Modify device struct if npx doesn't
+ * need to use interrupts. Return 1 if device exists.
+ */
+static int
+npxprobe(dvp)
+ struct isa_device *dvp;
+{
+ int result;
+ u_long save_eflags;
+ u_char save_icu1_mask;
+ u_char save_icu2_mask;
+ struct gate_descriptor save_idt_npxintr;
+ struct gate_descriptor save_idt_npxtrap;
+ /*
+ * This routine is now just a wrapper for npxprobe1(), to install
+ * special npx interrupt and trap handlers, to enable npx interrupts
+ * and to disable other interrupts. Someday isa_configure() will
+ * install suitable handlers and run with interrupts enabled so we
+ * won't need to do so much here.
+ */
+ npx_intrno = NRSVIDT + ffs(dvp->id_irq) - 1;
+ save_eflags = read_eflags();
+ disable_intr();
+ save_icu1_mask = inb(IO_ICU1 + 1);
+ save_icu2_mask = inb(IO_ICU2 + 1);
+ save_idt_npxintr = idt[npx_intrno];
+ save_idt_npxtrap = idt[16];
+ outb(IO_ICU1 + 1, ~(IRQ_SLAVE | dvp->id_irq));
+ outb(IO_ICU2 + 1, ~(dvp->id_irq >> 8));
+ setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL);
+ setidt(npx_intrno, probeintr, SDT_SYS386IGT, SEL_KPL);
+ npx_idt_probeintr = idt[npx_intrno];
+ enable_intr();
+ result = npxprobe1(dvp);
+ disable_intr();
+ outb(IO_ICU1 + 1, save_icu1_mask);
+ outb(IO_ICU2 + 1, save_icu2_mask);
+ idt[npx_intrno] = save_idt_npxintr;
+ idt[16] = save_idt_npxtrap;
+ write_eflags(save_eflags);
+ return (result);
+}
+
+static int
+npxprobe1(dvp)
+ struct isa_device *dvp;
+{
+ int control;
+ int status;
+#ifdef lint
+ npxintr();
+#endif
+ /*
+ * Partially reset the coprocessor, if any. Some BIOS's don't reset
+ * it after a warm boot.
+ */
+ outb(0xf1, 0); /* full reset on some systems, NOP on others */
+ outb(0xf0, 0); /* clear BUSY# latch */
+ /*
+ * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
+ * instructions. We must set the CR0_MP bit and use the CR0_TS
+ * bit to control the trap, because setting the CR0_EM bit does
+ * not cause WAIT instructions to trap. It's important to trap
+ * WAIT instructions - otherwise the "wait" variants of no-wait
+ * control instructions would degenerate to the "no-wait" variants
+ * after FP context switches but work correctly otherwise. It's
+ * particularly important to trap WAITs when there is no NPX -
+ * otherwise the "wait" variants would always degenerate.
+ *
+ * Try setting CR0_NE to get correct error reporting on 486DX's.
+ * Setting it should fail or do nothing on lesser processors.
+ */
+ load_cr0(rcr0() | CR0_MP | CR0_NE);
+ /*
+ * But don't trap while we're probing.
+ */
+ stop_emulating();
+ /*
+ * Finish resetting the coprocessor, if any. If there is an error
+ * pending, then we may get a bogus IRQ13, but probeintr() will handle
+ * it OK. Bogus halts have never been observed, but we enabled
+ * IRQ13 and cleared the BUSY# latch early to handle them anyway.
+ */
+ fninit();
+ DELAY(1000); /* wait for any IRQ13 (fwait might hang) */
+#ifdef DIAGNOSTIC
+ if (npx_intrs_while_probing != 0)
+ printf("fninit caused %u bogus npx interrupt(s)\n",
+ npx_intrs_while_probing);
+ if (npx_traps_while_probing != 0)
+ printf("fninit caused %u bogus npx trap(s)\n",
+ npx_traps_while_probing);
+#endif
+ /*
+ * Check for a status of mostly zero.
+ */
+ status = 0x5a5a;
+ fnstsw(&status);
+ if ((status & 0xb8ff) == 0) {
+ /*
+ * Good, now check for a proper control word.
+ */
+ control = 0x5a5a;
+ fnstcw(&control);
+ if ((control & 0x1f3f) == 0x033f) {
+ npx_exists = 1;
+ /*
+ * We have an npx, now divide by 0 to see if exception
+ * 16 works.
+ */
+ control &= ~(1 << 2); /* enable divide by 0 trap */
+ fldcw(&control);
+ npx_traps_while_probing = npx_intrs_while_probing = 0;
+ fp_divide_by_0();
+ if (npx_traps_while_probing != 0) {
+ /*
+ * Good, exception 16 works.
+ */
+ npx_ex16 = 1;
+ dvp->id_irq = 0; /* zap the interrupt */
+ /*
+ * special return value to flag that we do not
+ * actually use any I/O registers
+ */
+ return (-1);
+ }
+ if (npx_intrs_while_probing != 0) {
+ /*
+ * Bad, we are stuck with IRQ13.
+ */
+ npx_irq13 = 1;
+ npx0_imask = dvp->id_irq; /* npxattach too late */
+ return (IO_NPXSIZE);
+ }
+ /*
+ * Worse, even IRQ13 is broken. Use emulator.
+ */
+ }
+ }
+ /*
+ * Probe failed, but we want to get to npxattach to initialize the
+ * emulator and say that it has been installed. XXX handle devices
+ * that aren't really devices better.
+ */
+ dvp->id_irq = 0;
+ /*
+ * special return value to flag that we do not
+ * actually use any I/O registers
+ */
+ return (-1);
+}
+
+/*
+ * Attach routine - announce which it is, and wire into system
+ */
+int
+npxattach(dvp)
+ struct isa_device *dvp;
+{
+ if (!npx_ex16 && !npx_irq13) {
+ if (npx_exists) {
+ printf("npx%d: Error reporting broken, using 387 emulator\n",dvp->id_unit);
+ npx_exists = 0;
+ } else {
+ printf("npx%d: 387 Emulator\n",dvp->id_unit);
+ }
+ }
+ npxinit(__INITIAL_NPXCW__);
+ return (1); /* XXX unused */
+}
+
+/*
+ * Initialize floating point unit.
+ */
+void
+npxinit(control)
+ u_int control;
+{
+ struct save87 dummy;
+
+ if (!npx_exists)
+ return;
+ /*
+ * fninit has the same h/w bugs as fnsave. Use the detoxified
+ * fnsave to throw away any junk in the fpu. fnsave initializes
+ * the fpu and sets npxproc = NULL as important side effects.
+ */
+ npxsave(&dummy);
+ stop_emulating();
+ fldcw(&control);
+ if (curpcb != NULL)
+ fnsave(&curpcb->pcb_savefpu);
+ start_emulating();
+}
+
+/*
+ * Free coprocessor (if we have it).
+ */
+void
+npxexit(p)
+ struct proc *p;
+{
+
+ if (p == npxproc) {
+ start_emulating();
+ npxproc = NULL;
+ }
+}
+
+/*
+ * Record the FPU state and reinitialize it all except for the control word.
+ * Then generate a SIGFPE.
+ *
+ * Reinitializing the state allows naive SIGFPE handlers to longjmp without
+ * doing any fixups.
+ *
+ * XXX there is currently no way to pass the full error state to signal
+ * handlers, and if this is a nested interrupt there is no way to pass even
+ * a status code! So there is no way to have a non-naive SIGFPE handler. At
+ * best a handler could do an fninit followed by an fldcw of a static value.
+ * fnclex would be of little use because it would leave junk on the FPU stack.
+ * Returning from the handler would be even less safe than usual because
+ * IRQ13 exception handling makes exceptions even less precise than usual.
+ */
+void
+npxintr(frame)
+ struct intrframe frame;
+{
+ int code;
+
+ if (npxproc == NULL || !npx_exists) {
+ /* XXX no %p in stand/printf.c. Cast to quiet gcc -Wall. */
+ printf("npxintr: npxproc = %lx, curproc = %lx, npx_exists = %d\n",
+ (u_long) npxproc, (u_long) curproc, npx_exists);
+ panic("npxintr from nowhere");
+ }
+ if (npxproc != curproc) {
+ printf("npxintr: npxproc = %lx, curproc = %lx, npx_exists = %d\n",
+ (u_long) npxproc, (u_long) curproc, npx_exists);
+ panic("npxintr from non-current process");
+ }
+ /*
+ * Save state. This does an implied fninit. It had better not halt
+ * the cpu or we'll hang.
+ */
+ outb(0xf0, 0);
+ fnsave(&curpcb->pcb_savefpu);
+ fwait();
+ /*
+ * Restore control word (was clobbered by fnsave).
+ */
+ fldcw(&curpcb->pcb_savefpu.sv_env.en_cw);
+ fwait();
+ /*
+ * Remember the exception status word and tag word. The current
+ * (almost fninit'ed) fpu state is in the fpu and the exception
+ * state just saved will soon be junk. However, the implied fninit
+ * doesn't change the error pointers or register contents, and we
+ * preserved the control word and will copy the status and tag
+ * words, so the complete exception state can be recovered.
+ */
+ curpcb->pcb_savefpu.sv_ex_sw = curpcb->pcb_savefpu.sv_env.en_sw;
+ curpcb->pcb_savefpu.sv_ex_tw = curpcb->pcb_savefpu.sv_env.en_tw;
+
+ /*
+ * Pass exception to process.
+ */
+ if (ISPL(frame.if_cs) == SEL_UPL) {
+ /*
+ * Interrupt is essentially a trap, so we can afford to call
+ * the SIGFPE handler (if any) as soon as the interrupt
+ * returns.
+ *
+ * XXX little or nothing is gained from this, and plenty is
+ * lost - the interrupt frame has to contain the trap frame
+ * (this is otherwise only necessary for the rescheduling trap
+ * in doreti, and the frame for that could easily be set up
+ * just before it is used).
+ */
+ curproc->p_md.md_regs = (int *)&frame.if_es;
+#ifdef notyet
+ /*
+ * Encode the appropriate code for detailed information on
+ * this exception.
+ */
+ code = XXX_ENCODE(curpcb->pcb_savefpu.sv_ex_sw);
+#else
+ code = 0; /* XXX */
+#endif
+ trapsignal(curproc, SIGFPE, code);
+ } else {
+ /*
+ * Nested interrupt. These losers occur when:
+ * o an IRQ13 is bogusly generated at a bogus time, e.g.:
+ * o immediately after an fnsave or frstor of an
+ * error state.
+ * o a couple of 386 instructions after
+ * "fstpl _memvar" causes a stack overflow.
+ * These are especially nasty when combined with a
+ * trace trap.
+ * o an IRQ13 occurs at the same time as another higher-
+ * priority interrupt.
+ *
+ * Treat them like a true async interrupt.
+ */
+ psignal(npxproc, SIGFPE);
+ }
+}
+
+/*
+ * Implement device not available (DNA) exception
+ *
+ * It would be better to switch FP context here (only). This would require
+ * saving the state in the proc table instead of in the pcb.
+ */
+int
+npxdna()
+{
+ if (!npx_exists)
+ return (0);
+ if (npxproc != NULL) {
+ printf("npxdna: npxproc = %lx, curproc = %lx\n",
+ (u_long) npxproc, (u_long) curproc);
+ panic("npxdna");
+ }
+ stop_emulating();
+ /*
+ * Record new context early in case frstor causes an IRQ13.
+ */
+ npxproc = curproc;
+ /*
+ * The following frstor may cause an IRQ13 when the state being
+ * restored has a pending error. The error will appear to have been
+ * triggered by the current (npx) user instruction even when that
+ * instruction is a no-wait instruction that should not trigger an
+ * error (e.g., fnclex). On at least one 486 system all of the
+ * no-wait instructions are broken the same as frstor, so our
+ * treatment does not amplify the breakage. On at least one
+ * 386/Cyrix 387 system, fnclex works correctly while frstor and
+ * fnsave are broken, so our treatment breaks fnclex if it is the
+ * first FPU instruction after a context switch.
+ */
+ frstor(&curpcb->pcb_savefpu);
+
+ return (1);
+}
+
+/*
+ * Wrapper for fnsave instruction to handle h/w bugs. If there is an error
+ * pending, then fnsave generates a bogus IRQ13 on some systems. Force
+ * any IRQ13 to be handled immediately, and then ignore it. This routine is
+ * often called at splhigh so it must not use many system services. In
+ * particular, it's much easier to install a special handler than to
+ * guarantee that it's safe to use npxintr() and its supporting code.
+ */
+void
+npxsave(addr)
+ struct save87 *addr;
+{
+ u_char icu1_mask;
+ u_char icu2_mask;
+ u_char old_icu1_mask;
+ u_char old_icu2_mask;
+ struct gate_descriptor save_idt_npxintr;
+
+ disable_intr();
+ old_icu1_mask = inb(IO_ICU1 + 1);
+ old_icu2_mask = inb(IO_ICU2 + 1);
+ save_idt_npxintr = idt[npx_intrno];
+ outb(IO_ICU1 + 1, old_icu1_mask & ~(IRQ_SLAVE | npx0_imask));
+ outb(IO_ICU2 + 1, old_icu2_mask & ~(npx0_imask >> 8));
+ idt[npx_intrno] = npx_idt_probeintr;
+ enable_intr();
+ stop_emulating();
+ fnsave(addr);
+ fwait();
+ start_emulating();
+ npxproc = NULL;
+ disable_intr();
+ icu1_mask = inb(IO_ICU1 + 1); /* masks may have changed */
+ icu2_mask = inb(IO_ICU2 + 1);
+ outb(IO_ICU1 + 1,
+ (icu1_mask & ~npx0_imask) | (old_icu1_mask & npx0_imask));
+ outb(IO_ICU2 + 1,
+ (icu2_mask & ~(npx0_imask >> 8))
+ | (old_icu2_mask & (npx0_imask >> 8)));
+ idt[npx_intrno] = save_idt_npxintr;
+ enable_intr(); /* back to usual state */
+}
+
+#endif /* NNPX > 0 */
diff --git a/sys/amd64/isa/timerreg.h b/sys/amd64/isa/timerreg.h
new file mode 100644
index 0000000..5742f66
--- /dev/null
+++ b/sys/amd64/isa/timerreg.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Header: timerreg.h,v 1.2 93/02/28 15:08:58 mccanne Exp
+ * $Id$
+ */
+
+/*
+ *
+ * Register definitions for the Intel 8253 Programmable Interval Timer.
+ *
+ * This chip has three independent 16-bit down counters that can be
+ * read on the fly. There are three mode registers and three countdown
+ * registers. The countdown registers are addressed directly, via the
+ * first three I/O ports. The three mode registers are accessed via
+ * the fourth I/O port, with two bits in the mode byte indicating the
+ * register. (Why are hardware interfaces always so braindead?).
+ *
+ * To write a value into the countdown register, the mode register
+ * is first programmed with a command indicating the which byte of
+ * the two byte register is to be modified. The three possibilities
+ * are load msb (TMR_MR_MSB), load lsb (TMR_MR_LSB), or load lsb then
+ * msb (TMR_MR_BOTH).
+ *
+ * To read the current value ("on the fly") from the countdown register,
+ * you write a "latch" command into the mode register, then read the stable
+ * value from the corresponding I/O port. For example, you write
+ * TMR_MR_LATCH into the corresponding mode register. Presumably,
+ * after doing this, a write operation to the I/O port would result
+ * in undefined behavior (but hopefully not fry the chip).
+ * Reading in this manner has no side effects.
+ *
+ * The outputs of the three timers are connected as follows:
+ *
+ * timer 0 -> irq 0
+ * timer 1 -> dma chan 0 (for dram refresh)
+ * timer 2 -> speaker (via keyboard controller)
+ *
+ * Timer 0 is used to call hardclock.
+ * Timer 2 is used to generate console beeps.
+ */
+
+/*
+ * Macros for specifying values to be written into a mode register.
+ */
+#define TIMER_CNTR0 (IO_TIMER1 + 0) /* timer 0 counter port */
+#define TIMER_CNTR1 (IO_TIMER1 + 1) /* timer 1 counter port */
+#define TIMER_CNTR2 (IO_TIMER1 + 2) /* timer 2 counter port */
+#define TIMER_MODE (IO_TIMER1 + 3) /* timer mode port */
+#define TIMER_SEL0 0x00 /* select counter 0 */
+#define TIMER_SEL1 0x40 /* select counter 1 */
+#define TIMER_SEL2 0x80 /* select counter 2 */
+#define TIMER_INTTC 0x00 /* mode 0, intr on terminal cnt */
+#define TIMER_ONESHOT 0x02 /* mode 1, one shot */
+#define TIMER_RATEGEN 0x04 /* mode 2, rate generator */
+#define TIMER_SQWAVE 0x06 /* mode 3, square wave */
+#define TIMER_SWSTROBE 0x08 /* mode 4, s/w triggered strobe */
+#define TIMER_HWSTROBE 0x0a /* mode 5, h/w triggered strobe */
+#define TIMER_LATCH 0x00 /* latch counter for reading */
+#define TIMER_LSB 0x10 /* r/w counter LSB */
+#define TIMER_MSB 0x20 /* r/w counter MSB */
+#define TIMER_16BIT 0x30 /* r/w counter 16 bits, LSB first */
+#define TIMER_BCD 0x01 /* count in BCD */
+
diff --git a/sys/amd64/isa/vector.S b/sys/amd64/isa/vector.S
new file mode 100644
index 0000000..7135ae7
--- /dev/null
+++ b/sys/amd64/isa/vector.S
@@ -0,0 +1,360 @@
+/*
+ * from: vector.s, 386BSD 0.1 unknown origin
+ * $Id: vector.s,v 1.6 1994/01/10 23:15:09 ache Exp $
+ */
+
+#include "i386/isa/icu.h"
+#include "i386/isa/isa.h"
+#include "vector.h"
+
+#define ICU_EOI 0x20 /* XXX - define elsewhere */
+
+#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_BYTE(irq_num) ((irq_num) / 8)
+
+#ifdef AUTO_EOI_1
+#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
+#else
+#define ENABLE_ICU1 \
+ movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
+ FASTER_NOP ; /* ... ASAP ... */ \
+ outb %al,$IO_ICU1 /* ... to clear in service bit */
+#endif
+
+#ifdef AUTO_EOI_2
+/*
+ * The data sheet says no auto-EOI on slave, but it sometimes works.
+ */
+#define ENABLE_ICU1_AND_2 ENABLE_ICU1
+#else
+#define ENABLE_ICU1_AND_2 \
+ movb $ICU_EOI,%al ; /* as above */ \
+ FASTER_NOP ; \
+ outb %al,$IO_ICU2 ; /* but do second icu first */ \
+ FASTER_NOP ; \
+ outb %al,$IO_ICU1 /* then first icu */
+#endif
+
+#ifdef FAST_INTR_HANDLER_USES_ES
+#define ACTUALLY_PUSHED 1
+#define MAYBE_MOVW_AX_ES movl %ax,%es
+#define MAYBE_POPL_ES popl %es
+#define MAYBE_PUSHL_ES pushl %es
+#else
+/*
+ * We can usually skip loading %es for fastintr handlers. %es should
+ * only be used for string instructions, and fastintr handlers shouldn't
+ * do anything slow enough to justify using a string instruction.
+ */
+#define ACTUALLY_PUSHED 0
+#define MAYBE_MOVW_AX_ES
+#define MAYBE_POPL_ES
+#define MAYBE_PUSHL_ES
+#endif
+
+/*
+ * Macros for interrupt interrupt entry, call to handler, and exit.
+ *
+ * XXX - the interrupt frame is set up to look like a trap frame. This is
+ * usually a waste of time. The only interrupt handlers that want a frame
+ * are the clock handler (it wants a clock frame), the npx handler (it's
+ * easier to do right all in assembler). The interrupt return routine
+ * needs a trap frame for rare AST's (it could easily convert the frame).
+ * The direct costs of setting up a trap frame are two pushl's (error
+ * code and trap number), an addl to get rid of these, and pushing and
+ * popping the call-saved regs %esi, %edi and %ebp twice, The indirect
+ * costs are making the driver interface nonuniform so unpending of
+ * interrupts is more complicated and slower (call_driver(unit) would
+ * be easier than ensuring an interrupt frame for all handlers. Finally,
+ * there are some struct copies in the npx handler and maybe in the clock
+ * handler that could be avoided by working more with pointers to frames
+ * instead of frames.
+ *
+ * XXX - should we do a cld on every system entry to avoid the requirement
+ * for scattered cld's?
+ *
+ * Coding notes for *.s:
+ *
+ * If possible, avoid operations that involve an operand size override.
+ * Word-sized operations might be smaller, but the operand size override
+ * makes them slower on on 486's and no faster on 386's unless perhaps
+ * the instruction pipeline is depleted. E.g.,
+ *
+ * Use movl to seg regs instead of the equivalent but more descriptive
+ * movw - gas generates an irelevant (slower) operand size override.
+ *
+ * Use movl to ordinary regs in preference to movw and especially
+ * in preference to movz[bw]l. Use unsigned (long) variables with the
+ * top bits clear instead of unsigned short variables to provide more
+ * opportunities for movl.
+ *
+ * If possible, use byte-sized operations. They are smaller and no slower.
+ *
+ * Use (%reg) instead of 0(%reg) - gas generates larger code for the latter.
+ *
+ * If the interrupt frame is made more flexible, INTR can push %eax first
+ * and decide the ipending case with less overhead, e.g., by avoiding
+ * loading segregs.
+ */
+
+#define FAST_INTR(unit, irq_num, id_num, handler, enable_icus) \
+ pushl %eax ; /* save only call-used registers */ \
+ pushl %ecx ; \
+ pushl %edx ; \
+ pushl %ds ; \
+ MAYBE_PUSHL_ES ; \
+ movl $KDSEL,%eax ; \
+ movl %ax,%ds ; \
+ MAYBE_MOVW_AX_ES ; \
+ FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
+ pushl $unit ; \
+ call handler ; /* do the work ASAP */ \
+ enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
+ addl $4,%esp ; \
+ incl _cnt+V_INTR ; /* book-keeping can wait */ \
+ incl _intrcnt_actv + (id_num) * 4 ; \
+ movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \
+ notl %eax ; \
+ andl _ipending,%eax ; \
+ jne 1f ; /* yes, handle them */ \
+ MEXITCOUNT ; \
+ MAYBE_POPL_ES ; \
+ popl %ds ; \
+ popl %edx ; \
+ popl %ecx ; \
+ popl %eax ; \
+ iret ; \
+; \
+ ALIGN_TEXT ; \
+1: ; \
+ movl _cpl,%eax ; \
+ movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \
+ sti ; /* ... to do this as early as possible */ \
+ MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
+ popl %ecx ; /* ... original %ds ... */ \
+ popl %edx ; \
+ xchgl %eax,(1+ACTUALLY_PUSHED)*4(%esp) ; /* orig %eax; save cpl */ \
+ pushal ; /* build fat frame (grrr) ... */ \
+ pushl %ecx ; /* ... actually %ds ... */ \
+ pushl %es ; \
+ movl $KDSEL,%eax ; \
+ movl %ax,%es ; \
+ movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
+ movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \
+ movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
+ pushl %eax ; \
+ subl $4,%esp ; /* junk for unit number */ \
+ MEXITCOUNT ; \
+ jmp _doreti
+
+#define INTR(unit, irq_num, id_num, mask, handler, icu, enable_icus, reg, stray) \
+ pushl $0 ; /* dumby error code */ \
+ pushl $0 ; /* dumby trap type */ \
+ pushal ; \
+ pushl %ds ; /* save our data and extra segments ... */ \
+ pushl %es ; \
+ movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
+ movl %ax,%ds ; /* ... early for obsolete reasons */ \
+ movl %ax,%es ; \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ FASTER_NOP ; \
+ outb %al,$icu+1 ; \
+ enable_icus ; \
+ incl _cnt+V_INTR ; /* tally interrupts */ \
+ movl _cpl,%eax ; \
+ testb $IRQ_BIT(irq_num),%reg ; \
+ jne 2f ; \
+1: ; \
+ FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \
+ incl _intrcnt_actv + (id_num) * 4 ; \
+ movl _cpl,%eax ; \
+ pushl %eax ; \
+ pushl $unit ; \
+ orl mask,%eax ; \
+ movl %eax,_cpl ; \
+ sti ; \
+ call handler ; \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ FASTER_NOP ; \
+ outb %al,$icu+1 ; \
+ MEXITCOUNT ; \
+ /* We could usually avoid the following jmp by inlining some of */ \
+ /* _doreti, but it's probably better to use less cache. */ \
+ jmp _doreti ; \
+; \
+ ALIGN_TEXT ; \
+2: ; \
+ /* XXX skip mcounting here to avoid double count */ \
+ movl $1b,%eax ; /* register resume address */ \
+ /* XXX - someday do it at attach time */ \
+ movl %eax,ihandlers + (irq_num) * 4 ; \
+ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp ; \
+ iret
+
+/*
+ * vector.h has defined a macro 'BUILD_VECTORS' containing a big list of info
+ * about vectors, including a submacro 'BUILD_VECTOR' that operates on the
+ * info about each vector. We redefine 'BUILD_VECTOR' to expand the info
+ * in different ways. Here we expand it to a list of interrupt handlers.
+ * This order is of course unimportant. Elsewhere we expand it to inline
+ * linear search code for which the order is a little more important and
+ * concatenating the code with no holes is very important.
+ *
+ * XXX - now there is BUILD_FAST_VECTOR as well as BUILD_VECTOR.
+ *
+ * The info consists of the following items for each vector:
+ *
+ * name (identifier): name of the vector; used to build labels
+ * unit (expression): unit number to call the device driver with
+ * irq_num (number): number of the IRQ to handled (0-15)
+ * id_num (number): uniq numeric id for handler (assigned by config)
+ * mask (blank-ident): priority mask used
+ * handler (blank-ident): interrupt handler to call
+ * icu_num (number): (1 + irq_num / 8) converted for label building
+ * icu_enables (number): 1 for icu_num == 1, 1_AND_2 for icu_num == 2
+ * reg (blank-ident): al for icu_num == 1, ah for icu_num == 2
+ *
+ * 'irq_num' is converted in several ways at config time to get around
+ * limitations in cpp. The macros have blanks after commas iff they would
+ * not mess up identifiers and numbers.
+ */
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .globl handler ; \
+ .text ; \
+ .globl _V/**/name ; \
+ SUPERALIGN_TEXT ; \
+_V/**/name: ; \
+ FAST_INTR(unit, irq_num,id_num, handler, ENABLE_ICU/**/icu_enables)
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .globl handler ; \
+ .text ; \
+ .globl _V/**/name ; \
+ SUPERALIGN_TEXT ; \
+_V/**/name: ; \
+ INTR(unit,irq_num, id_num, mask, handler, IO_ICU/**/icu_num, \
+ ENABLE_ICU/**/icu_enables, reg,)
+
+MCOUNT_LABEL(bintr)
+ BUILD_VECTORS
+
+ /* hardware interrupt catcher (IDT 32 - 47) */
+ .globl _isa_strayintr
+
+#define STRAYINTR(irq_num, icu_num, icu_enables, reg) \
+IDTVEC(intr/**/irq_num) ; \
+ INTR(irq_num,irq_num,irq_num, _high_imask, _isa_strayintr, \
+ IO_ICU/**/icu_num, ENABLE_ICU/**/icu_enables, reg,stray)
+
+/*
+ * XXX - the mask (1 << 2) == IRQ_SLAVE will be generated for IRQ 2, instead
+ * of the mask IRQ2 (defined as IRQ9 == (1 << 9)). But IRQ 2 "can't happen".
+ * In fact, all stray interrupts "can't happen" except for bugs. The
+ * "stray" IRQ 7 is documented behaviour of the 8259. It happens when there
+ * is a glitch on any of its interrupt inputs. Does it really interrupt when
+ * IRQ 7 is masked?
+ *
+ * XXX - unpend doesn't work for these, it sends them to the real handler.
+ *
+ * XXX - the race bug during initialization may be because I changed the
+ * order of switching from the stray to the real interrupt handler to before
+ * enabling interrupts. The old order looked unsafe but maybe it is OK with
+ * the stray interrupt handler installed. But these handlers only reduce
+ * the window of vulnerability - it is still open at the end of
+ * isa_configure().
+ *
+ * XXX - many comments are stale.
+ */
+
+ STRAYINTR(0,1,1, al)
+ STRAYINTR(1,1,1, al)
+ STRAYINTR(2,1,1, al)
+ STRAYINTR(3,1,1, al)
+ STRAYINTR(4,1,1, al)
+ STRAYINTR(5,1,1, al)
+ STRAYINTR(6,1,1, al)
+ STRAYINTR(7,1,1, al)
+ STRAYINTR(8,2,1_AND_2, ah)
+ STRAYINTR(9,2,1_AND_2, ah)
+ STRAYINTR(10,2,1_AND_2, ah)
+ STRAYINTR(11,2,1_AND_2, ah)
+ STRAYINTR(12,2,1_AND_2, ah)
+ STRAYINTR(13,2,1_AND_2, ah)
+ STRAYINTR(14,2,1_AND_2, ah)
+ STRAYINTR(15,2,1_AND_2, ah)
+#if 0
+ INTRSTRAY(255, _highmask, 255) ; call _isa_strayintr ; INTREXIT2
+#endif
+MCOUNT_LABEL(eintr)
+
+/*
+ * These are the interrupt counters, I moved them here from icu.s so that
+ * they are with the name table. rgrimes
+ *
+ * There are now lots of counters, this has been redone to work with
+ * Bruce Evans intr-0.1 code, which I modified some more to make it all
+ * work with vmstat.
+ */
+ .data
+ihandlers: /* addresses of interrupt handlers */
+ .space NHWI*4 /* actually resumption addresses for HWI's */
+ .long swi_tty, swi_net, 0, 0, 0, 0, 0, 0
+ .long 0, 0, 0, 0, 0, 0, swi_clock, swi_ast
+imasks: /* masks for interrupt handlers */
+ .space NHWI*4 /* padding; HWI masks are elsewhere */
+ .long SWI_TTY_MASK, SWI_NET_MASK, 0, 0, 0, 0, 0, 0
+ .long 0, 0, 0, 0, 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+
+ .globl _intrcnt
+_intrcnt: /* used by vmstat to calc size of table */
+ .globl _intrcnt_bad7
+_intrcnt_bad7: .space 4 /* glitches on irq 7 */
+ .globl _intrcnt_bad15
+_intrcnt_bad15: .space 4 /* glitches on irq 15 */
+ .globl _intrcnt_stray
+_intrcnt_stray: .space 4 /* total count of stray interrupts */
+ .globl _intrcnt_actv
+_intrcnt_actv: .space NR_REAL_INT_HANDLERS * 4 /* active interrupts */
+ .globl _eintrcnt
+_eintrcnt: /* used by vmstat to calc size of table */
+
+/*
+ * Build the interrupt name table for vmstat
+ */
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR BUILD_VECTOR
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .ascii "name irq" ; \
+ .asciz "irq_num"
+/*
+ * XXX - use the __STRING and __CONCAT macros from <sys/cdefs.h> to stringize
+ * and concatenate names above and elsewhere. Note that __CONCAT doesn't
+ * work when nested.
+ */
+
+ .text
+ .globl _intrnames, _eintrnames
+_intrnames:
+ BUILD_VECTOR(bad,,7,,,,,,)
+ BUILD_VECTOR(bad,,15,,,,,,)
+ BUILD_VECTOR(stray,,,,,,,,)
+ BUILD_VECTORS
+
+_eintrnames:
diff --git a/sys/amd64/isa/vector.s b/sys/amd64/isa/vector.s
new file mode 100644
index 0000000..7135ae7
--- /dev/null
+++ b/sys/amd64/isa/vector.s
@@ -0,0 +1,360 @@
+/*
+ * from: vector.s, 386BSD 0.1 unknown origin
+ * $Id: vector.s,v 1.6 1994/01/10 23:15:09 ache Exp $
+ */
+
+#include "i386/isa/icu.h"
+#include "i386/isa/isa.h"
+#include "vector.h"
+
+#define ICU_EOI 0x20 /* XXX - define elsewhere */
+
+#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
+#define IRQ_BYTE(irq_num) ((irq_num) / 8)
+
+#ifdef AUTO_EOI_1
+#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
+#else
+#define ENABLE_ICU1 \
+ movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
+ FASTER_NOP ; /* ... ASAP ... */ \
+ outb %al,$IO_ICU1 /* ... to clear in service bit */
+#endif
+
+#ifdef AUTO_EOI_2
+/*
+ * The data sheet says no auto-EOI on slave, but it sometimes works.
+ */
+#define ENABLE_ICU1_AND_2 ENABLE_ICU1
+#else
+#define ENABLE_ICU1_AND_2 \
+ movb $ICU_EOI,%al ; /* as above */ \
+ FASTER_NOP ; \
+ outb %al,$IO_ICU2 ; /* but do second icu first */ \
+ FASTER_NOP ; \
+ outb %al,$IO_ICU1 /* then first icu */
+#endif
+
+#ifdef FAST_INTR_HANDLER_USES_ES
+#define ACTUALLY_PUSHED 1
+#define MAYBE_MOVW_AX_ES movl %ax,%es
+#define MAYBE_POPL_ES popl %es
+#define MAYBE_PUSHL_ES pushl %es
+#else
+/*
+ * We can usually skip loading %es for fastintr handlers. %es should
+ * only be used for string instructions, and fastintr handlers shouldn't
+ * do anything slow enough to justify using a string instruction.
+ */
+#define ACTUALLY_PUSHED 0
+#define MAYBE_MOVW_AX_ES
+#define MAYBE_POPL_ES
+#define MAYBE_PUSHL_ES
+#endif
+
+/*
+ * Macros for interrupt interrupt entry, call to handler, and exit.
+ *
+ * XXX - the interrupt frame is set up to look like a trap frame. This is
+ * usually a waste of time. The only interrupt handlers that want a frame
+ * are the clock handler (it wants a clock frame), the npx handler (it's
+ * easier to do right all in assembler). The interrupt return routine
+ * needs a trap frame for rare AST's (it could easily convert the frame).
+ * The direct costs of setting up a trap frame are two pushl's (error
+ * code and trap number), an addl to get rid of these, and pushing and
+ * popping the call-saved regs %esi, %edi and %ebp twice, The indirect
+ * costs are making the driver interface nonuniform so unpending of
+ * interrupts is more complicated and slower (call_driver(unit) would
+ * be easier than ensuring an interrupt frame for all handlers. Finally,
+ * there are some struct copies in the npx handler and maybe in the clock
+ * handler that could be avoided by working more with pointers to frames
+ * instead of frames.
+ *
+ * XXX - should we do a cld on every system entry to avoid the requirement
+ * for scattered cld's?
+ *
+ * Coding notes for *.s:
+ *
+ * If possible, avoid operations that involve an operand size override.
+ * Word-sized operations might be smaller, but the operand size override
+ * makes them slower on on 486's and no faster on 386's unless perhaps
+ * the instruction pipeline is depleted. E.g.,
+ *
+ * Use movl to seg regs instead of the equivalent but more descriptive
+ * movw - gas generates an irelevant (slower) operand size override.
+ *
+ * Use movl to ordinary regs in preference to movw and especially
+ * in preference to movz[bw]l. Use unsigned (long) variables with the
+ * top bits clear instead of unsigned short variables to provide more
+ * opportunities for movl.
+ *
+ * If possible, use byte-sized operations. They are smaller and no slower.
+ *
+ * Use (%reg) instead of 0(%reg) - gas generates larger code for the latter.
+ *
+ * If the interrupt frame is made more flexible, INTR can push %eax first
+ * and decide the ipending case with less overhead, e.g., by avoiding
+ * loading segregs.
+ */
+
+#define FAST_INTR(unit, irq_num, id_num, handler, enable_icus) \
+ pushl %eax ; /* save only call-used registers */ \
+ pushl %ecx ; \
+ pushl %edx ; \
+ pushl %ds ; \
+ MAYBE_PUSHL_ES ; \
+ movl $KDSEL,%eax ; \
+ movl %ax,%ds ; \
+ MAYBE_MOVW_AX_ES ; \
+ FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
+ pushl $unit ; \
+ call handler ; /* do the work ASAP */ \
+ enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
+ addl $4,%esp ; \
+ incl _cnt+V_INTR ; /* book-keeping can wait */ \
+ incl _intrcnt_actv + (id_num) * 4 ; \
+ movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \
+ notl %eax ; \
+ andl _ipending,%eax ; \
+ jne 1f ; /* yes, handle them */ \
+ MEXITCOUNT ; \
+ MAYBE_POPL_ES ; \
+ popl %ds ; \
+ popl %edx ; \
+ popl %ecx ; \
+ popl %eax ; \
+ iret ; \
+; \
+ ALIGN_TEXT ; \
+1: ; \
+ movl _cpl,%eax ; \
+ movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \
+ sti ; /* ... to do this as early as possible */ \
+ MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
+ popl %ecx ; /* ... original %ds ... */ \
+ popl %edx ; \
+ xchgl %eax,(1+ACTUALLY_PUSHED)*4(%esp) ; /* orig %eax; save cpl */ \
+ pushal ; /* build fat frame (grrr) ... */ \
+ pushl %ecx ; /* ... actually %ds ... */ \
+ pushl %es ; \
+ movl $KDSEL,%eax ; \
+ movl %ax,%es ; \
+ movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
+ movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \
+ movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
+ pushl %eax ; \
+ subl $4,%esp ; /* junk for unit number */ \
+ MEXITCOUNT ; \
+ jmp _doreti
+
+#define INTR(unit, irq_num, id_num, mask, handler, icu, enable_icus, reg, stray) \
+ pushl $0 ; /* dumby error code */ \
+ pushl $0 ; /* dumby trap type */ \
+ pushal ; \
+ pushl %ds ; /* save our data and extra segments ... */ \
+ pushl %es ; \
+ movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
+ movl %ax,%ds ; /* ... early for obsolete reasons */ \
+ movl %ax,%es ; \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ orb $IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ FASTER_NOP ; \
+ outb %al,$icu+1 ; \
+ enable_icus ; \
+ incl _cnt+V_INTR ; /* tally interrupts */ \
+ movl _cpl,%eax ; \
+ testb $IRQ_BIT(irq_num),%reg ; \
+ jne 2f ; \
+1: ; \
+ FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \
+ incl _intrcnt_actv + (id_num) * 4 ; \
+ movl _cpl,%eax ; \
+ pushl %eax ; \
+ pushl $unit ; \
+ orl mask,%eax ; \
+ movl %eax,_cpl ; \
+ sti ; \
+ call handler ; \
+ movb _imen + IRQ_BYTE(irq_num),%al ; \
+ andb $~IRQ_BIT(irq_num),%al ; \
+ movb %al,_imen + IRQ_BYTE(irq_num) ; \
+ FASTER_NOP ; \
+ outb %al,$icu+1 ; \
+ MEXITCOUNT ; \
+ /* We could usually avoid the following jmp by inlining some of */ \
+ /* _doreti, but it's probably better to use less cache. */ \
+ jmp _doreti ; \
+; \
+ ALIGN_TEXT ; \
+2: ; \
+ /* XXX skip mcounting here to avoid double count */ \
+ movl $1b,%eax ; /* register resume address */ \
+ /* XXX - someday do it at attach time */ \
+ movl %eax,ihandlers + (irq_num) * 4 ; \
+ orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \
+ popl %es ; \
+ popl %ds ; \
+ popal ; \
+ addl $4+4,%esp ; \
+ iret
+
+/*
+ * vector.h has defined a macro 'BUILD_VECTORS' containing a big list of info
+ * about vectors, including a submacro 'BUILD_VECTOR' that operates on the
+ * info about each vector. We redefine 'BUILD_VECTOR' to expand the info
+ * in different ways. Here we expand it to a list of interrupt handlers.
+ * This order is of course unimportant. Elsewhere we expand it to inline
+ * linear search code for which the order is a little more important and
+ * concatenating the code with no holes is very important.
+ *
+ * XXX - now there is BUILD_FAST_VECTOR as well as BUILD_VECTOR.
+ *
+ * The info consists of the following items for each vector:
+ *
+ * name (identifier): name of the vector; used to build labels
+ * unit (expression): unit number to call the device driver with
+ * irq_num (number): number of the IRQ to handled (0-15)
+ * id_num (number): uniq numeric id for handler (assigned by config)
+ * mask (blank-ident): priority mask used
+ * handler (blank-ident): interrupt handler to call
+ * icu_num (number): (1 + irq_num / 8) converted for label building
+ * icu_enables (number): 1 for icu_num == 1, 1_AND_2 for icu_num == 2
+ * reg (blank-ident): al for icu_num == 1, ah for icu_num == 2
+ *
+ * 'irq_num' is converted in several ways at config time to get around
+ * limitations in cpp. The macros have blanks after commas iff they would
+ * not mess up identifiers and numbers.
+ */
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .globl handler ; \
+ .text ; \
+ .globl _V/**/name ; \
+ SUPERALIGN_TEXT ; \
+_V/**/name: ; \
+ FAST_INTR(unit, irq_num,id_num, handler, ENABLE_ICU/**/icu_enables)
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .globl handler ; \
+ .text ; \
+ .globl _V/**/name ; \
+ SUPERALIGN_TEXT ; \
+_V/**/name: ; \
+ INTR(unit,irq_num, id_num, mask, handler, IO_ICU/**/icu_num, \
+ ENABLE_ICU/**/icu_enables, reg,)
+
+MCOUNT_LABEL(bintr)
+ BUILD_VECTORS
+
+ /* hardware interrupt catcher (IDT 32 - 47) */
+ .globl _isa_strayintr
+
+#define STRAYINTR(irq_num, icu_num, icu_enables, reg) \
+IDTVEC(intr/**/irq_num) ; \
+ INTR(irq_num,irq_num,irq_num, _high_imask, _isa_strayintr, \
+ IO_ICU/**/icu_num, ENABLE_ICU/**/icu_enables, reg,stray)
+
+/*
+ * XXX - the mask (1 << 2) == IRQ_SLAVE will be generated for IRQ 2, instead
+ * of the mask IRQ2 (defined as IRQ9 == (1 << 9)). But IRQ 2 "can't happen".
+ * In fact, all stray interrupts "can't happen" except for bugs. The
+ * "stray" IRQ 7 is documented behaviour of the 8259. It happens when there
+ * is a glitch on any of its interrupt inputs. Does it really interrupt when
+ * IRQ 7 is masked?
+ *
+ * XXX - unpend doesn't work for these, it sends them to the real handler.
+ *
+ * XXX - the race bug during initialization may be because I changed the
+ * order of switching from the stray to the real interrupt handler to before
+ * enabling interrupts. The old order looked unsafe but maybe it is OK with
+ * the stray interrupt handler installed. But these handlers only reduce
+ * the window of vulnerability - it is still open at the end of
+ * isa_configure().
+ *
+ * XXX - many comments are stale.
+ */
+
+ STRAYINTR(0,1,1, al)
+ STRAYINTR(1,1,1, al)
+ STRAYINTR(2,1,1, al)
+ STRAYINTR(3,1,1, al)
+ STRAYINTR(4,1,1, al)
+ STRAYINTR(5,1,1, al)
+ STRAYINTR(6,1,1, al)
+ STRAYINTR(7,1,1, al)
+ STRAYINTR(8,2,1_AND_2, ah)
+ STRAYINTR(9,2,1_AND_2, ah)
+ STRAYINTR(10,2,1_AND_2, ah)
+ STRAYINTR(11,2,1_AND_2, ah)
+ STRAYINTR(12,2,1_AND_2, ah)
+ STRAYINTR(13,2,1_AND_2, ah)
+ STRAYINTR(14,2,1_AND_2, ah)
+ STRAYINTR(15,2,1_AND_2, ah)
+#if 0
+ INTRSTRAY(255, _highmask, 255) ; call _isa_strayintr ; INTREXIT2
+#endif
+MCOUNT_LABEL(eintr)
+
+/*
+ * These are the interrupt counters, I moved them here from icu.s so that
+ * they are with the name table. rgrimes
+ *
+ * There are now lots of counters, this has been redone to work with
+ * Bruce Evans intr-0.1 code, which I modified some more to make it all
+ * work with vmstat.
+ */
+ .data
+ihandlers: /* addresses of interrupt handlers */
+ .space NHWI*4 /* actually resumption addresses for HWI's */
+ .long swi_tty, swi_net, 0, 0, 0, 0, 0, 0
+ .long 0, 0, 0, 0, 0, 0, swi_clock, swi_ast
+imasks: /* masks for interrupt handlers */
+ .space NHWI*4 /* padding; HWI masks are elsewhere */
+ .long SWI_TTY_MASK, SWI_NET_MASK, 0, 0, 0, 0, 0, 0
+ .long 0, 0, 0, 0, 0, 0, SWI_CLOCK_MASK, SWI_AST_MASK
+
+ .globl _intrcnt
+_intrcnt: /* used by vmstat to calc size of table */
+ .globl _intrcnt_bad7
+_intrcnt_bad7: .space 4 /* glitches on irq 7 */
+ .globl _intrcnt_bad15
+_intrcnt_bad15: .space 4 /* glitches on irq 15 */
+ .globl _intrcnt_stray
+_intrcnt_stray: .space 4 /* total count of stray interrupts */
+ .globl _intrcnt_actv
+_intrcnt_actv: .space NR_REAL_INT_HANDLERS * 4 /* active interrupts */
+ .globl _eintrcnt
+_eintrcnt: /* used by vmstat to calc size of table */
+
+/*
+ * Build the interrupt name table for vmstat
+ */
+
+#undef BUILD_FAST_VECTOR
+#define BUILD_FAST_VECTOR BUILD_VECTOR
+
+#undef BUILD_VECTOR
+#define BUILD_VECTOR(name, unit, irq_num, id_num, mask, handler, \
+ icu_num, icu_enables, reg) \
+ .ascii "name irq" ; \
+ .asciz "irq_num"
+/*
+ * XXX - use the __STRING and __CONCAT macros from <sys/cdefs.h> to stringize
+ * and concatenate names above and elsewhere. Note that __CONCAT doesn't
+ * work when nested.
+ */
+
+ .text
+ .globl _intrnames, _eintrnames
+_intrnames:
+ BUILD_VECTOR(bad,,7,,,,,,)
+ BUILD_VECTOR(bad,,15,,,,,,)
+ BUILD_VECTOR(stray,,,,,,,,)
+ BUILD_VECTORS
+
+_eintrnames:
OpenPOWER on IntegriCloud