summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordg <dg@FreeBSD.org>1993-11-13 02:25:21 +0000
committerdg <dg@FreeBSD.org>1993-11-13 02:25:21 +0000
commitaa3ae6ef2a770093235bb79679c3028e026d5622 (patch)
treeb33fc248f12eae8dd82b98b630cad31e781b9daa
parentec9c017c73df0e3495f05d244eeb3a58328bfdb8 (diff)
downloadFreeBSD-src-aa3ae6ef2a770093235bb79679c3028e026d5622.zip
FreeBSD-src-aa3ae6ef2a770093235bb79679c3028e026d5622.tar.gz
First steps in rewriting locore.s, and making info useful
when the machine panics. i386/i386/locore.s: 1) got rid of most .set directives that were being used like #define's, and replaced them with appropriate #define's in the appropriate header files (accessed via genassym). 2) added comments to header inclusions and global definitions, and global variables 3) replaced some hardcoded constants with cpp defines (such as PDESIZE and others) 4) aligned all comments to the same column to make them easier to read 5) moved macro definitions for ENTRY, ALIGN, NOP, etc. to /sys/i386/include/asmacros.h 6) added #ifdef BDE_DEBUGGER around all of Bruce's debugger code 7) added new global '_KERNend' to store last location+1 of kernel 8) cleaned up zeroing of bss so that only bss is zeroed 9) fix zeroing of page tables so that it really does zero them all - not just if they follow the bss. 10) rewrote page table initialization code so that 1) works correctly and 2) write protects the kernel text by default 11) properly initialize the kernel page directory, upages, p0stack PT, and page tables. The previous scheme was more than a bit screwy. 12) change allocation of virtual area of IO hole so that it is fixed at KERNBASE + 0xa0000. The previous scheme put it right after the kernel page tables and then later expected it to be at KERNBASE +0xa0000 13) change multiple bogus settings of user read/write of various areas of kernel VM - including the IO hole; we should never be accessing the IO hole in user mode through the kernel page tables 14) split kernel support routines such as bcopy, bzero, copyin, copyout, etc. into a seperate file 'support.s' 15) split swtch and related routines into a seperate 'swtch.s' 16) split routines related to traps, syscalls, and interrupts into a seperate file 'exception.s' 17) remove some unused global variables from locore that got inserted by Garrett when he pulled them out of some .h files. i386/isa/icu.s: 1) clean up global variable declarations 2) move in declaration of astpending and netisr i386/i386/pmap.c: 1) fix calculation of virtual_avail. It previously was calculated to be right in the middle of the kernel page tables - not a good place to start allocating kernel VM. 2) properly allocate kernel page dir/tables etc out of kernel map - previously only took out 2 pages. i386/i386/machdep.c: 1) modify boot() to print a warning that the system will reboot in PANIC_REBOOT_WAIT_TIME amount of seconds, and let the user abort with a key on the console. The machine will wait for ever if a key is typed before the reboot. The default is 15 seconds, but can be set to 0 to mean don't wait at all, -1 to mean wait forever, or any positive value to wait for that many seconds. 2) print "Rebooting..." just before doing it. kern/subr_prf.c: 1) remove PANICWAIT as it is deprecated by the change to machdep.c i386/i386/trap.c: 1) add table of trap type strings and use it to print a real trap/ panic message rather than just a number. Lot's of work to be done here, but this is the first step. Symbolic traceback is in the TODO. i386/i386/Makefile.i386: 1) add support in to build support.s, exception.s and swtch.s ...and various changes to various header files to make all of the above happen.
-rw-r--r--sys/amd64/amd64/cpu_switch.S435
-rw-r--r--sys/amd64/amd64/exception.S289
-rw-r--r--sys/amd64/amd64/exception.s289
-rw-r--r--sys/amd64/amd64/genassym.c17
-rw-r--r--sys/amd64/amd64/locore.S1998
-rw-r--r--sys/amd64/amd64/locore.s1998
-rw-r--r--sys/amd64/amd64/machdep.c31
-rw-r--r--sys/amd64/amd64/pmap.c8
-rw-r--r--sys/amd64/amd64/support.S1031
-rw-r--r--sys/amd64/amd64/support.s1031
-rw-r--r--sys/amd64/amd64/swtch.s435
-rw-r--r--sys/amd64/amd64/trap.c54
-rw-r--r--sys/amd64/include/asmacros.h43
-rw-r--r--sys/amd64/include/pmap.h14
-rw-r--r--sys/conf/Makefile.i38625
-rw-r--r--sys/conf/Makefile.powerpc25
-rw-r--r--sys/i386/conf/Makefile.i38625
-rw-r--r--sys/i386/i386/exception.s289
-rw-r--r--sys/i386/i386/genassym.c17
-rw-r--r--sys/i386/i386/locore.s1998
-rw-r--r--sys/i386/i386/machdep.c31
-rw-r--r--sys/i386/i386/pmap.c8
-rw-r--r--sys/i386/i386/support.s1031
-rw-r--r--sys/i386/i386/swtch.s435
-rw-r--r--sys/i386/i386/trap.c54
-rw-r--r--sys/i386/include/asmacros.h43
-rw-r--r--sys/i386/include/param.h4
-rw-r--r--sys/i386/include/pmap.h14
-rw-r--r--sys/i386/isa/icu.s22
-rw-r--r--sys/kern/subr_trap.c54
30 files changed, 6274 insertions, 5474 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
new file mode 100644
index 0000000..ec6e8bc
--- /dev/null
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -0,0 +1,435 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "npx.h" /* for NNPX */
+#include "assym.s" /* for preprocessor defines */
+#include "errno.h" /* for error codes */
+
+#include "i386/isa/debug.h" /* for SHOW macros */
+#include "machine/asmacros.h" /* for miscellaneous assembly macros */
+
+/*****************************************************************************/
+/* Scheduling */
+/*****************************************************************************/
+
+/*
+ * The following primitives manipulate the run queues.
+ * _whichqs tells which of the 32 queues _qs
+ * have processes in them. Setrq puts processes into queues, Remrq
+ * removes them from queues. The running process is on no queue,
+ * other processes are on a queue related to p->p_pri, divided by 4
+ * actually to shrink the 0-127 range of priorities into the 32 available
+ * queues.
+ */
+ .data
+ .globl _curpcb, _whichqs
+_curpcb: .long 0 /* pointer to curproc's PCB area */
+_whichqs: .long 0 /* which run queues have data */
+
+ .globl _qs,_cnt,_panic
+ .comm _noproc,4
+ .comm _runrun,4
+
+ .globl _want_resched
+_want_resched: .long 0 /* we need to re-run the scheduler */
+
+ .text
+/*
+ * Setrq(p)
+ *
+ * Call should be made at spl6(), and p->p_stat should be SRUN
+ */
+ENTRY(setrq)
+ movl 4(%esp),%eax
+ cmpl $0,P_RLINK(%eax) /* should not be on q already */
+ je set1
+ pushl $set2
+ call _panic
+set1:
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btsl %edx,_whichqs /* set q full bit */
+ shll $3,%edx
+ addl $_qs,%edx /* locate q hdr */
+ movl %edx,P_LINK(%eax) /* link process on tail of q */
+ movl P_RLINK(%edx),%ecx
+ movl %ecx,P_RLINK(%eax)
+ movl %eax,P_RLINK(%edx)
+ movl %eax,P_LINK(%ecx)
+ ret
+
+set2: .asciz "setrq"
+
+/*
+ * Remrq(p)
+ *
+ * Call should be made at spl6().
+ */
+ENTRY(remrq)
+ movl 4(%esp),%eax
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btrl %edx,_whichqs /* clear full bit, panic if clear already */
+ jb rem1
+ pushl $rem3
+ call _panic
+rem1:
+ pushl %edx
+ movl P_LINK(%eax),%ecx /* unlink process */
+ movl P_RLINK(%eax),%edx
+ movl %edx,P_RLINK(%ecx)
+ movl P_RLINK(%eax),%ecx
+ movl P_LINK(%eax),%edx
+ movl %edx,P_LINK(%ecx)
+ popl %edx
+ movl $_qs,%ecx
+ shll $3,%edx
+ addl %edx,%ecx
+ cmpl P_LINK(%ecx),%ecx /* q still has something? */
+ je rem2
+ shrl $3,%edx /* yes, set bit as still full */
+ btsl %edx,_whichqs
+rem2:
+ movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
+ ret
+
+rem3: .asciz "remrq"
+sw0: .asciz "swtch"
+
+/*
+ * When no processes are on the runq, Swtch branches to idle
+ * to wait for something to come ready.
+ */
+ ALIGN_TEXT
+Idle:
+ sti
+ SHOW_STI
+
+ ALIGN_TEXT
+idle_loop:
+ call _spl0
+ cmpl $0,_whichqs
+ jne sw1
+ hlt /* wait for interrupt */
+ jmp idle_loop
+
+badsw:
+ pushl $sw0
+ call _panic
+ /*NOTREACHED*/
+
+/*
+ * Swtch()
+ */
+ SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
+ENTRY(swtch)
+
+ incl _cnt+V_SWTCH
+
+ /* switch to new process. first, save context as needed */
+
+ movl _curproc,%ecx
+
+ /* if no process to save, don't bother */
+ testl %ecx,%ecx
+ je sw1
+
+ movl P_ADDR(%ecx),%ecx
+
+ movl (%esp),%eax /* Hardware registers */
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /* have we used fp, and need a save? */
+ mov _curproc,%eax
+ cmp %eax,_npxproc
+ jne 1f
+ pushl %ecx /* h/w bugs make saving complicated */
+ leal PCB_SAVEFPU(%ecx),%eax
+ pushl %eax
+ call _npxsave /* do it in a big C function */
+ popl %eax
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%eax /* save temporary map PTE */
+ movl %eax,PCB_CMAP2(%ecx) /* in our context */
+ movl $0,_curproc /* out of process */
+
+# movw _cpl,%ax
+# movw %ax,PCB_IML(%ecx) /* save ipl */
+
+ /* save is done, now choose a new process or idle */
+sw1:
+ cli
+ SHOW_CLI
+ movl _whichqs,%edi
+2:
+ /* XXX - bsf is sloow */
+ bsfl %edi,%eax /* find a full q */
+ je Idle /* if none, idle */
+ /* XX update whichqs? */
+swfnd:
+ btrl %eax,%edi /* clear q full status */
+ jnb 2b /* if it was clear, look for another */
+ movl %eax,%ebx /* save which one we are using */
+
+ shll $3,%eax
+ addl $_qs,%eax /* select q */
+ movl %eax,%esi
+
+#ifdef DIAGNOSTIC
+ cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
+ je badsw /* not possible */
+#endif
+
+ movl P_LINK(%eax),%ecx /* unlink from front of process q */
+ movl P_LINK(%ecx),%edx
+ movl %edx,P_LINK(%eax)
+ movl P_RLINK(%ecx),%eax
+ movl %eax,P_RLINK(%edx)
+
+ cmpl P_LINK(%ecx),%esi /* q empty */
+ je 3f
+ btsl %ebx,%edi /* nope, set to indicate full */
+3:
+ movl %edi,_whichqs /* update q status */
+
+ movl $0,%eax
+ movl %eax,_want_resched
+
+#ifdef DIAGNOSTIC
+ cmpl %eax,P_WCHAN(%ecx)
+ jne badsw
+ cmpb $SRUN,P_STAT(%ecx)
+ jne badsw
+#endif
+
+ movl %eax,P_RLINK(%ecx) /* isolate process to run */
+ movl P_ADDR(%ecx),%edx
+ movl PCB_CR3(%edx),%ebx
+
+ /* switch address space */
+ movl %ebx,%cr3
+
+ /* restore context */
+ movl PCB_EBX(%edx),%ebx
+ movl PCB_ESP(%edx),%esp
+ movl PCB_EBP(%edx),%ebp
+ movl PCB_ESI(%edx),%esi
+ movl PCB_EDI(%edx),%edi
+ movl PCB_EIP(%edx),%eax
+ movl %eax,(%esp)
+
+ movl PCB_CMAP2(%edx),%eax /* get temporary map */
+ movl %eax,_CMAP2 /* reload temporary map PTE */
+
+ movl %ecx,_curproc /* into next process */
+ movl %edx,_curpcb
+
+ pushl %edx /* save p to return */
+/*
+ * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
+ * I think restoring the cpl is unnecessary, but we must turn off the cli
+ * now that spl*() don't do it as a side affect.
+ */
+ pushl PCB_IML(%edx)
+ sti
+ SHOW_STI
+#if 0
+ call _splx
+#endif
+ addl $4,%esp
+/*
+ * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
+ * same way. Better return a value.
+ */
+ popl %eax /* return(p); */
+ ret
+
+ENTRY(mvesp)
+ movl %esp,%eax
+ ret
+/*
+ * struct proc *swtch_to_inactive(p) ; struct proc *p;
+ *
+ * At exit of a process, move off the address space of the
+ * process and onto a "safe" one. Then, on a temporary stack
+ * return and run code that disposes of the old state.
+ * Since this code requires a parameter from the "old" stack,
+ * pass it back as a return value.
+ */
+ENTRY(swtch_to_inactive)
+ popl %edx /* old pc */
+ popl %eax /* arg, our return value */
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3 /* good bye address space */
+ #write buffer?
+ movl $tmpstk-4,%esp /* temporary stack, compensated for call */
+ jmp %edx /* return, execute remainder of cleanup */
+
+/*
+ * savectx(pcb, altreturn)
+ * Update pcb, saving current processor state and arranging
+ * for alternate return ala longjmp in swtch if altreturn is true.
+ */
+ENTRY(savectx)
+ movl 4(%esp),%ecx
+ movw _cpl,%ax
+ movw %ax,PCB_IML(%ecx)
+ movl (%esp),%eax
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /*
+ * If npxproc == NULL, then the npx h/w state is irrelevant and the
+ * state had better already be in the pcb. This is true for forks
+ * but not for dumps (the old book-keeping with FP flags in the pcb
+ * always lost for dumps because the dump pcb has 0 flags).
+ *
+ * If npxproc != NULL, then we have to save the npx h/w state to
+ * npxproc's pcb and copy it to the requested pcb, or save to the
+ * requested pcb and reload. Copying is easier because we would
+ * have to handle h/w bugs for reloading. We used to lose the
+ * parent's npx state for forks by forgetting to reload.
+ */
+ mov _npxproc,%eax
+ testl %eax,%eax
+ je 1f
+
+ pushl %ecx
+ movl P_ADDR(%eax),%eax
+ leal PCB_SAVEFPU(%eax),%eax
+ pushl %eax
+ pushl %eax
+ call _npxsave
+ popl %eax
+ popl %eax
+ popl %ecx
+
+ pushl %ecx
+ pushl $108+8*2 /* XXX h/w state size + padding */
+ leal PCB_SAVEFPU(%ecx),%ecx
+ pushl %ecx
+ pushl %eax
+ call _bcopy
+ addl $12,%esp
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%edx /* save temporary map PTE */
+ movl %edx,PCB_CMAP2(%ecx) /* in our context */
+
+ cmpl $0,8(%esp)
+ je 1f
+ movl %esp,%edx /* relocate current sp relative to pcb */
+ subl $_kstack,%edx /* (sp is relative to kstack): */
+ addl %edx,%ecx /* pcb += sp - kstack; */
+ movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
+
+/* this mess deals with replicating register state gcc hides */
+ movl 12(%esp),%eax
+ movl %eax,12(%ecx)
+ movl 16(%esp),%eax
+ movl %eax,16(%ecx)
+ movl 20(%esp),%eax
+ movl %eax,20(%ecx)
+ movl 24(%esp),%eax
+ movl %eax,24(%ecx)
+1:
+ xorl %eax,%eax /* return 0 */
+ ret
+
+/*
+ * addupc(int pc, struct uprof *up, int ticks):
+ * update profiling information for the user process.
+ */
+ENTRY(addupc)
+ pushl %ebp
+ movl %esp,%ebp
+ movl 12(%ebp),%edx /* up */
+ movl 8(%ebp),%eax /* pc */
+
+ subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
+ jl L1 /* if (pc < 0) return */
+
+ shrl $1,%eax /* praddr = pc >> 1 */
+ imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
+ shrl $15,%eax /* praddr = praddr << 15 */
+ andl $-2,%eax /* praddr &= ~1 */
+
+ cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
+ ja L1
+
+/* addl %eax,%eax /* praddr -> word offset */
+ addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
+ movl 16(%ebp),%ecx /* ticks */
+
+ movl _curpcb,%edx
+ movl $proffault,PCB_ONFAULT(%edx)
+ addl %ecx,(%eax) /* storage location += ticks */
+ movl $0,PCB_ONFAULT(%edx)
+L1:
+ leave
+ ret
+
+ ALIGN_TEXT
+proffault:
+ /* if we get a fault, then kill profiling all together */
+ movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
+ movl 12(%ebp),%ecx
+ movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
+ leave
+ ret
+
+/* To be done: */
+ENTRY(astoff)
+ ret
+
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
new file mode 100644
index 0000000..d6de874
--- /dev/null
+++ b/sys/amd64/amd64/exception.S
@@ -0,0 +1,289 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "npx.h" /* NNPX */
+
+#include "assym.s" /* system defines */
+
+#include "errno.h" /* error return codes */
+
+#include "i386/isa/debug.h" /* BDE debugging macros */
+
+#include "machine/trap.h" /* trap codes */
+#include "syscall.h" /* syscall numbers */
+
+#include "machine/asmacros.h" /* miscellaneous macros */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define SEL_RPL_MASK 0x0003
+#define TRAPF_CS_OFF (13 * 4)
+
+ .text
+
+/*****************************************************************************/
+/* Trap handling */
+/*****************************************************************************/
+/*
+ * Trap and fault vector routines
+ *
+ * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
+ * control. The sti's give the standard losing behaviour for ddb and kgdb.
+ */
+#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
+#define TRAP(a) pushl $(a) ; jmp alltraps
+#ifdef KGDB
+# define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
+#else
+# define BPTTRAP(a) sti; TRAP(a)
+#endif
+
+IDTVEC(div)
+ pushl $0; TRAP(T_DIVIDE)
+IDTVEC(dbg)
+#if defined(BDE_DEBUGGER) && defined(BDBTRAP)
+ BDBTRAP(dbg)
+#endif
+ pushl $0; BPTTRAP(T_TRCTRAP)
+IDTVEC(nmi)
+ pushl $0; TRAP(T_NMI)
+IDTVEC(bpt)
+#if defined(BDE_DEBUGGER) && defined(BDBTRAP)
+ BDBTRAP(bpt)
+#endif
+ pushl $0; BPTTRAP(T_BPTFLT)
+IDTVEC(ofl)
+ pushl $0; TRAP(T_OFLOW)
+IDTVEC(bnd)
+ pushl $0; TRAP(T_BOUND)
+IDTVEC(ill)
+ pushl $0; TRAP(T_PRIVINFLT)
+IDTVEC(dna)
+ pushl $0; TRAP(T_DNA)
+IDTVEC(dble)
+ TRAP(T_DOUBLEFLT)
+IDTVEC(fpusegm)
+ pushl $0; TRAP(T_FPOPFLT)
+IDTVEC(tss)
+ TRAP(T_TSSFLT)
+IDTVEC(missing)
+ TRAP(T_SEGNPFLT)
+IDTVEC(stk)
+ TRAP(T_STKFLT)
+IDTVEC(prot)
+ TRAP(T_PROTFLT)
+IDTVEC(page)
+ TRAP(T_PAGEFLT)
+IDTVEC(rsvd)
+ pushl $0; TRAP(T_RESERVED)
+IDTVEC(fpu)
+#if NNPX > 0
+ /*
+ * Handle like an interrupt so that we can call npxintr to clear the
+ * error. It would be better to handle npx interrupts as traps but
+ * this is difficult for nested interrupts.
+ */
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop /* silly, the bug is for popal and it only
+ * bites when the next instruction has a
+ * complicated address mode */
+ pushl %ds
+ pushl %es /* now the stack frame is a trap frame */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ pushl _cpl
+ pushl $0 /* dummy unit to finish building intr frame */
+ incl _cnt+V_TRAP
+ call _npxintr
+ jmp doreti
+#else /* NNPX > 0 */
+ pushl $0; TRAP(T_ARITHTRAP)
+#endif /* NNPX > 0 */
+ /* 17 - 31 reserved for future exp */
+IDTVEC(rsvd0)
+ pushl $0; TRAP(17)
+IDTVEC(rsvd1)
+ pushl $0; TRAP(18)
+IDTVEC(rsvd2)
+ pushl $0; TRAP(19)
+IDTVEC(rsvd3)
+ pushl $0; TRAP(20)
+IDTVEC(rsvd4)
+ pushl $0; TRAP(21)
+IDTVEC(rsvd5)
+ pushl $0; TRAP(22)
+IDTVEC(rsvd6)
+ pushl $0; TRAP(23)
+IDTVEC(rsvd7)
+ pushl $0; TRAP(24)
+IDTVEC(rsvd8)
+ pushl $0; TRAP(25)
+IDTVEC(rsvd9)
+ pushl $0; TRAP(26)
+IDTVEC(rsvd10)
+ pushl $0; TRAP(27)
+IDTVEC(rsvd11)
+ pushl $0; TRAP(28)
+IDTVEC(rsvd12)
+ pushl $0; TRAP(29)
+IDTVEC(rsvd13)
+ pushl $0; TRAP(30)
+IDTVEC(rsvd14)
+ pushl $0; TRAP(31)
+
+ SUPERALIGN_TEXT
+alltraps:
+ pushal
+ nop
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+calltrap:
+ incl _cnt+V_TRAP
+ call _trap
+ /*
+ * Return through doreti to handle ASTs. Have to change trap frame
+ * to interrupt frame.
+ */
+ movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
+ pushl _cpl
+ pushl $0 /* dummy unit */
+ jmp doreti
+
+#ifdef KGDB
+/*
+ * This code checks for a kgdb trap, then falls through
+ * to the regular trap code.
+ */
+ SUPERALIGN_TEXT
+bpttraps:
+ pushal
+ nop
+ pushl %es
+ pushl %ds
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) /* non-kernel mode? */
+ jne calltrap /* yes */
+ call _kgdb_trap_glue
+ jmp calltrap
+#endif
+
+/*
+ * Call gate entry for syscall
+ */
+ SUPERALIGN_TEXT
+IDTVEC(syscall)
+ pushfl /* only for stupid carry bit and more stupid wait3 cc kludge */
+ /* XXX - also for direction flag (bzero, etc. clear it) */
+ pushal /* only need eax,ecx,edx - trap resaves others */
+ nop
+ movl $KDSEL,%eax /* switch to kernel segments */
+ movl %ax,%ds
+ movl %ax,%es
+ incl _cnt+V_SYSCALL
+ call _syscall
+ /*
+ * Return through doreti to handle ASTs. Have to change syscall frame
+ * to interrupt frame.
+ *
+ * XXX - we should have set up the frame earlier to avoid the
+ * following popal/pushal (not much can be done to avoid shuffling
+ * the flags). Consistent frames would simplify things all over.
+ */
+ movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
+ movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
+ movl 32+8(%esp),%ecx
+ movl %ebx,32+0(%esp)
+ movl %ecx,32+4(%esp)
+ movl %eax,32+8(%esp)
+ popal
+ nop
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop
+ movl __udatasel,%eax /* switch back to user segments */
+ pushl %eax /* XXX - better to preserve originals? */
+ pushl %eax
+ pushl _cpl
+ pushl $0
+ jmp doreti
+
+#ifdef SHOW_A_LOT
+/*
+ * 'show_bits' was too big when defined as a macro. The line length for some
+ * enclosing macro was too big for gas. Perhaps the code would have blown
+ * the cache anyway.
+ */
+ ALIGN_TEXT
+show_bits:
+ pushl %eax
+ SHOW_BIT(0)
+ SHOW_BIT(1)
+ SHOW_BIT(2)
+ SHOW_BIT(3)
+ SHOW_BIT(4)
+ SHOW_BIT(5)
+ SHOW_BIT(6)
+ SHOW_BIT(7)
+ SHOW_BIT(8)
+ SHOW_BIT(9)
+ SHOW_BIT(10)
+ SHOW_BIT(11)
+ SHOW_BIT(12)
+ SHOW_BIT(13)
+ SHOW_BIT(14)
+ SHOW_BIT(15)
+ popl %eax
+ ret
+
+ .data
+bit_colors:
+ .byte GREEN,RED,0,0
+ .text
+
+#endif /* SHOW_A_LOT */
+
+/*
+ * include generated interrupt vectors and ISA intr code
+ */
+#include "i386/isa/vector.s"
+#include "i386/isa/icu.s"
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
new file mode 100644
index 0000000..d6de874
--- /dev/null
+++ b/sys/amd64/amd64/exception.s
@@ -0,0 +1,289 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "npx.h" /* NNPX */
+
+#include "assym.s" /* system defines */
+
+#include "errno.h" /* error return codes */
+
+#include "i386/isa/debug.h" /* BDE debugging macros */
+
+#include "machine/trap.h" /* trap codes */
+#include "syscall.h" /* syscall numbers */
+
+#include "machine/asmacros.h" /* miscellaneous macros */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define SEL_RPL_MASK 0x0003
+#define TRAPF_CS_OFF (13 * 4)
+
+ .text
+
+/*****************************************************************************/
+/* Trap handling */
+/*****************************************************************************/
+/*
+ * Trap and fault vector routines
+ *
+ * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
+ * control. The sti's give the standard losing behaviour for ddb and kgdb.
+ */
+#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
+#define TRAP(a) pushl $(a) ; jmp alltraps
+#ifdef KGDB
+# define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
+#else
+# define BPTTRAP(a) sti; TRAP(a)
+#endif
+
+IDTVEC(div)
+ pushl $0; TRAP(T_DIVIDE)
+IDTVEC(dbg)
+#if defined(BDE_DEBUGGER) && defined(BDBTRAP)
+ BDBTRAP(dbg)
+#endif
+ pushl $0; BPTTRAP(T_TRCTRAP)
+IDTVEC(nmi)
+ pushl $0; TRAP(T_NMI)
+IDTVEC(bpt)
+#if defined(BDE_DEBUGGER) && defined(BDBTRAP)
+ BDBTRAP(bpt)
+#endif
+ pushl $0; BPTTRAP(T_BPTFLT)
+IDTVEC(ofl)
+ pushl $0; TRAP(T_OFLOW)
+IDTVEC(bnd)
+ pushl $0; TRAP(T_BOUND)
+IDTVEC(ill)
+ pushl $0; TRAP(T_PRIVINFLT)
+IDTVEC(dna)
+ pushl $0; TRAP(T_DNA)
+IDTVEC(dble)
+ TRAP(T_DOUBLEFLT)
+IDTVEC(fpusegm)
+ pushl $0; TRAP(T_FPOPFLT)
+IDTVEC(tss)
+ TRAP(T_TSSFLT)
+IDTVEC(missing)
+ TRAP(T_SEGNPFLT)
+IDTVEC(stk)
+ TRAP(T_STKFLT)
+IDTVEC(prot)
+ TRAP(T_PROTFLT)
+IDTVEC(page)
+ TRAP(T_PAGEFLT)
+IDTVEC(rsvd)
+ pushl $0; TRAP(T_RESERVED)
+IDTVEC(fpu)
+#if NNPX > 0
+ /*
+ * Handle like an interrupt so that we can call npxintr to clear the
+ * error. It would be better to handle npx interrupts as traps but
+ * this is difficult for nested interrupts.
+ */
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop /* silly, the bug is for popal and it only
+ * bites when the next instruction has a
+ * complicated address mode */
+ pushl %ds
+ pushl %es /* now the stack frame is a trap frame */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ pushl _cpl
+ pushl $0 /* dummy unit to finish building intr frame */
+ incl _cnt+V_TRAP
+ call _npxintr
+ jmp doreti
+#else /* NNPX > 0 */
+ pushl $0; TRAP(T_ARITHTRAP)
+#endif /* NNPX > 0 */
+ /* 17 - 31 reserved for future exp */
+IDTVEC(rsvd0)
+ pushl $0; TRAP(17)
+IDTVEC(rsvd1)
+ pushl $0; TRAP(18)
+IDTVEC(rsvd2)
+ pushl $0; TRAP(19)
+IDTVEC(rsvd3)
+ pushl $0; TRAP(20)
+IDTVEC(rsvd4)
+ pushl $0; TRAP(21)
+IDTVEC(rsvd5)
+ pushl $0; TRAP(22)
+IDTVEC(rsvd6)
+ pushl $0; TRAP(23)
+IDTVEC(rsvd7)
+ pushl $0; TRAP(24)
+IDTVEC(rsvd8)
+ pushl $0; TRAP(25)
+IDTVEC(rsvd9)
+ pushl $0; TRAP(26)
+IDTVEC(rsvd10)
+ pushl $0; TRAP(27)
+IDTVEC(rsvd11)
+ pushl $0; TRAP(28)
+IDTVEC(rsvd12)
+ pushl $0; TRAP(29)
+IDTVEC(rsvd13)
+ pushl $0; TRAP(30)
+IDTVEC(rsvd14)
+ pushl $0; TRAP(31)
+
+ SUPERALIGN_TEXT
+alltraps:
+ pushal
+ nop
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+calltrap:
+ incl _cnt+V_TRAP
+ call _trap
+ /*
+ * Return through doreti to handle ASTs. Have to change trap frame
+ * to interrupt frame.
+ */
+ movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
+ pushl _cpl
+ pushl $0 /* dummy unit */
+ jmp doreti
+
+#ifdef KGDB
+/*
+ * This code checks for a kgdb trap, then falls through
+ * to the regular trap code.
+ */
+ SUPERALIGN_TEXT
+bpttraps:
+ pushal
+ nop
+ pushl %es
+ pushl %ds
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) /* non-kernel mode? */
+ jne calltrap /* yes */
+ call _kgdb_trap_glue
+ jmp calltrap
+#endif
+
+/*
+ * Call gate entry for syscall
+ */
+ SUPERALIGN_TEXT
+IDTVEC(syscall)
+ pushfl /* only for stupid carry bit and more stupid wait3 cc kludge */
+ /* XXX - also for direction flag (bzero, etc. clear it) */
+ pushal /* only need eax,ecx,edx - trap resaves others */
+ nop
+ movl $KDSEL,%eax /* switch to kernel segments */
+ movl %ax,%ds
+ movl %ax,%es
+ incl _cnt+V_SYSCALL
+ call _syscall
+ /*
+ * Return through doreti to handle ASTs. Have to change syscall frame
+ * to interrupt frame.
+ *
+ * XXX - we should have set up the frame earlier to avoid the
+ * following popal/pushal (not much can be done to avoid shuffling
+ * the flags). Consistent frames would simplify things all over.
+ */
+ movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
+ movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
+ movl 32+8(%esp),%ecx
+ movl %ebx,32+0(%esp)
+ movl %ecx,32+4(%esp)
+ movl %eax,32+8(%esp)
+ popal
+ nop
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop
+ movl __udatasel,%eax /* switch back to user segments */
+ pushl %eax /* XXX - better to preserve originals? */
+ pushl %eax
+ pushl _cpl
+ pushl $0
+ jmp doreti
+
+#ifdef SHOW_A_LOT
+/*
+ * 'show_bits' was too big when defined as a macro. The line length for some
+ * enclosing macro was too big for gas. Perhaps the code would have blown
+ * the cache anyway.
+ */
+ ALIGN_TEXT
+show_bits:
+ pushl %eax
+ SHOW_BIT(0)
+ SHOW_BIT(1)
+ SHOW_BIT(2)
+ SHOW_BIT(3)
+ SHOW_BIT(4)
+ SHOW_BIT(5)
+ SHOW_BIT(6)
+ SHOW_BIT(7)
+ SHOW_BIT(8)
+ SHOW_BIT(9)
+ SHOW_BIT(10)
+ SHOW_BIT(11)
+ SHOW_BIT(12)
+ SHOW_BIT(13)
+ SHOW_BIT(14)
+ SHOW_BIT(15)
+ popl %eax
+ ret
+
+ .data
+bit_colors:
+ .byte GREEN,RED,0,0
+ .text
+
+#endif /* SHOW_A_LOT */
+
+/*
+ * include generated interrupt vectors and ISA intr code
+ */
+#include "i386/isa/vector.s"
+#include "i386/isa/icu.s"
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index 381f3df..84c212a 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
- * $Id: genassym.c,v 1.4 1993/10/12 15:33:18 rgrimes Exp $
+ * $Id: genassym.c,v 1.5 1993/10/15 10:34:17 rgrimes Exp $
*/
#include "sys/param.h"
@@ -96,10 +96,14 @@ main()
printf("#define\tCLSIZE %d\n", CLSIZE);
printf("#define\tNBPG %d\n", NBPG);
printf("#define\tNPTEPG %d\n", NPTEPG);
+ printf("#define\tPDESIZE %d\n", PDESIZE);
+ printf("#define\tPTESIZE %d\n", PTESIZE);
printf("#define\tNKPDE %d\n", NKPDE);
- printf("#define\tKPTDI %d\n", KPTDI);
- printf("#define\tPTDPTDI %d\n", PTDPTDI);
- printf("#define\tAPTDPTDI %d\n", APTDPTDI);
+ printf("#define\tKPTDI 0x%x\n", KPTDI);
+ printf("#define\tKSTKPTDI 0x%x\n", KSTKPTDI);
+ printf("#define\tKSTKPTEOFF 0x%x\n", KSTKPTEOFF);
+ printf("#define\tPTDPTDI 0x%x\n", PTDPTDI);
+ printf("#define\tAPTDPTDI 0x%x\n", APTDPTDI);
printf("#define\tPGSHIFT %d\n", PGSHIFT);
printf("#define\tPDRSHIFT %d\n", PDRSHIFT);
printf("#define\tSYSPTSIZE %d\n", SYSPTSIZE);
@@ -108,9 +112,8 @@ main()
#ifdef SYSVSHM
printf("#define\tSHMMAXPGS %d\n", SHMMAXPGS);
#endif
- printf("#define\tUSRSTACK %d\n", USRSTACK);
- printf("#define\tKERNBASE %d\n", KERNBASE);
- printf("#define\tKERNSIZE %d\n", KERNSIZE);
+ printf("#define\tUSRSTACK 0x%x\n", USRSTACK);
+ printf("#define\tKERNBASE 0x%x\n", KERNBASE);
printf("#define\tMSGBUFPTECNT %d\n", btoc(sizeof (struct msgbuf)));
printf("#define\tNMBCLUSTERS %d\n", NMBCLUSTERS);
printf("#define\tMCLBYTES %d\n", MCLBYTES);
diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S
index d808571..4b4e36f 100644
--- a/sys/amd64/amd64/locore.S
+++ b/sys/amd64/amd64/locore.S
@@ -34,86 +34,41 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
- * $Id: locore.s,v 1.8 1993/10/15 10:34:19 rgrimes Exp $
+ * $Id$
*/
-
/*
- * locore.s: 4BSD machine support for the Intel 386
- * Preliminary version
- * Written by William F. Jolitz, 386BSD Project
+ * locore.s: FreeBSD machine support for the Intel 386
+ * originally from: locore.s, by William F. Jolitz
+ *
+ * Substantially rewritten by David Greenman, Rod Grimes,
+ * Bruce Evans, Wolfgang Solfrank, and many others.
*/
-#include "npx.h"
+#include "npx.h" /* for NNPX */
-#include "assym.s"
-#include "machine/psl.h"
-#include "machine/pte.h"
+#include "assym.s" /* system definitions */
+#include "machine/psl.h" /* processor status longword defs */
+#include "machine/pte.h" /* page table entry definitions */
-#include "errno.h"
+#include "errno.h" /* error return codes */
-#include "machine/trap.h"
+#include "machine/specialreg.h" /* x86 special registers */
+#include "i386/isa/debug.h" /* BDE debugging macros */
+#include "machine/cputypes.h" /* x86 cpu type definitions */
-#include "machine/specialreg.h"
-#include "i386/isa/debug.h"
-#include "machine/cputypes.h"
+#include "syscall.h" /* system call numbers */
-#define KDSEL 0x10
-#define SEL_RPL_MASK 0x0003
-#define TRAPF_CS_OFF (13 * 4)
+#include "machine/asmacros.h" /* miscellaneous asm macros */
/*
+ * XXX
+ *
* Note: This version greatly munged to avoid various assembler errors
* that may be fixed in newer versions of gas. Perhaps newer versions
* will have more pleasant appearance.
*/
- .set IDXSHIFT,10
-
-#define ALIGN_DATA .align 2
-#define ALIGN_TEXT .align 2,0x90 /* 4-byte boundaries, NOP-filled */
-#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte boundaries better for 486 */
-
-#define GEN_ENTRY(name) ALIGN_TEXT; .globl name; name:
-#define NON_GPROF_ENTRY(name) GEN_ENTRY(_/**/name)
-
-#ifdef GPROF
-/*
- * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
- * over the mcounting.
- */
-#define ALTENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
-#define ENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; 2:
-/*
- * The call to mcount supports the usual (bad) conventions. We allocate
- * some data and pass a pointer to it although the 386BSD doesn't use
- * the data. We set up a frame before calling mcount because that is
- * the standard convention although it makes work for both mcount and
- * callers.
- */
-#define MCOUNT .data; ALIGN_DATA; 1:; .long 0; .text; \
- pushl %ebp; movl %esp,%ebp; \
- movl $1b,%eax; call mcount; popl %ebp
-#else
-/*
- * ALTENTRY() has to align because it is before a corresponding ENTRY().
- * ENTRY() has to align to because there may be no ALTENTRY() before it.
- * If there is a previous ALTENTRY() then the alignment code is empty.
- */
-#define ALTENTRY(name) GEN_ENTRY(_/**/name)
-#define ENTRY(name) GEN_ENTRY(_/**/name)
-#endif
-
-/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
-/* XXX: NOP and FASTER_NOP are misleadingly named */
-#ifdef DUMMY_NOPS /* this will break some older machines */
-#define FASTER_NOP
-#define NOP
-#else
-#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
-#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
-#endif
-
/*
* PTmap is recursive pagemap at top of virtual address space.
* Within PTmap, the page directory can be found (third indirection).
@@ -121,8 +76,9 @@
.globl _PTmap,_PTD,_PTDpde,_Sysmap
.set _PTmap,PTDPTDI << PDRSHIFT
.set _PTD,_PTmap + (PTDPTDI * NBPG)
- .set _PTDpde,_PTD + (PTDPTDI * 4) /* XXX 4=sizeof pde */
+ .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
+/* Sysmap is the base address of the kernel page tables */
.set _Sysmap,_PTmap + (KPTDI * NBPG)
/*
@@ -132,7 +88,7 @@
.globl _APTmap,_APTD,_APTDpde
.set _APTmap,APTDPTDI << PDRSHIFT
.set _APTD,_APTmap + (APTDPTDI * NBPG)
- .set _APTDpde,_PTD + (APTDPTDI * 4) /* XXX 4=sizeof pde */
+ .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
/*
* Access to each processes kernel stack is via a region of
@@ -141,44 +97,41 @@
*/
.set _kstack,USRSTACK
.globl _kstack
- .set PPDROFF,0x3F6
- .set PPTEOFF,0x400-UPAGES /* 0x3FE */
-
/*
* Globals
*/
.data
.globl _esym
-_esym: .long 0 /* ptr to end of syms */
+_esym: .long 0 /* ptr to end of syms */
.globl _boothowto,_bootdev,_curpcb
.globl _cpu,_cold,_atdevbase
-_cpu: .long 0 /* are we 386, 386sx, or 486 */
-_cold: .long 1 /* cold till we are not */
-_atdevbase: .long 0 /* location of start of iomem in virtual */
-_atdevphys: .long 0 /* location of device mapping ptes (phys) */
+_cpu: .long 0 /* are we 386, 386sx, or 486 */
+_cold: .long 1 /* cold till we are not */
+_atdevbase: .long 0 /* location of start of iomem in virtual */
+_atdevphys: .long 0 /* location of device mapping ptes (phys) */
+
+ .globl _KERNend
+_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
.globl _IdlePTD,_KPTphys
-_IdlePTD: .long 0
-_KPTphys: .long 0
+_IdlePTD: .long 0 /* phys addr of kernel PTD */
+_KPTphys: .long 0 /* phys addr of kernel page tables */
- .globl _curpcb, _whichqs
-_curpcb: .long 0 /* pointer to curproc's PCB area */
-_whichqs: .long 0 /* which run queues have data */
+ .globl _cyloffset
+_cyloffset: .long 0 /* cylinder offset from boot blocks */
- .globl _cyloffset,_proc0paddr
-_cyloffset: .long 0
-_proc0paddr: .long 0
+ .globl _proc0paddr
+_proc0paddr: .long 0 /* address of proc 0 address space */
- /* Stuff for network ASTs */
- .globl _softem,_netisr,_astpending,_want_resched
-_softem: .long 0 /* WFJ only knows... */
-_netisr: .long 0 /* set with bits for which queue to service */
-_astpending: .long 0 /* tells us an AST needs to be taken */
-_want_resched: .long 0 /* we need to re-schedule */
+#ifdef BDE_DEBUGGER
+ .globl _bdb_exists /* flag to indicate BDE debugger is available */
+ .long 0
+#endif
+ .globl tmpstk
.space 512
tmpstk:
@@ -193,9 +146,9 @@ tmpstk:
* Also the entry point (jumped to directly from the boot blocks).
*/
ENTRY(btext)
- movw $0x1234,0x472 /* warm boot */
+ movw $0x1234,0x472 /* warm boot */
jmp 1f
- .space 0x500 /* skip over warm boot shit */
+ .space 0x500 /* skip over warm boot shit */
/*
* pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
@@ -249,53 +202,60 @@ ENTRY(btext)
* Oops, the gdt is in the carcass of the boot program so clearing
* the rest of memory is still not possible.
*/
- movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
+ movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
/*
* Virtual address space of kernel:
*
* text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
- * 0 1 2 3 4
+ * pages: 1 UPAGES (2) 1 NKPDE (7)
*/
/* find end of kernel image */
movl $_end-KERNBASE,%ecx
- addl $NBPG-1,%ecx /* page align up */
+ addl $NBPG-1,%ecx /* page align up */
andl $~(NBPG-1),%ecx
- movl %ecx,%esi /* esi=start of tables */
+ movl %ecx,%esi /* esi=start of tables */
+ movl %ecx,_KERNend-KERNBASE /* save end of kernel */
-/* clear bss and memory for bootstrap pagetables. */
+/* clear bss */
movl $_edata-KERNBASE,%edi
- subl %edi,%ecx
- addl $(UPAGES+5)*NBPG,%ecx /* size of tables */
-
- xorl %eax,%eax /* pattern */
+ subl %edi,%ecx /* get mount to clear */
+ xorl %eax,%eax /* specify zero fill */
cld
rep
stosb
/*
- * If we are loaded at 0x0 check to see if we have space for the
- * page tables pages after the kernel and before the 640K ISA memory
- * hole. If we do not have space relocate the page table pages and
- * the kernel stack to start at 1MB. The value that ends up in esi
- * is used by the rest of locore to build the tables. Locore adjusts
- * esi each time it allocates a structure and then passes the final
- * value to init386(first) as the value first. esi should ALWAYS
- * be page aligned!!
- */
- movl %esi,%ecx /* Get current first availiable address */
- cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
- jge 1f /* yep, don't need to check for room */
- addl $(NKPDE + 4) * NBPG,%ecx /* XXX the 4 is for kstack */
- /* space for kstack, PTD and PTE's */
- cmpl $(640*1024),%ecx
- /* see if it fits in low memory */
- jle 1f /* yep, don't need to relocate it */
- movl $0x100000,%esi /* won't fit, so start it at 1MB */
+ * If we are loaded at 0x0 check to see if we have space for the
+ * page dir/tables and stack area after the kernel and before the 640K
+ * ISA memory hole. If we do not have space relocate the page directory,
+ * UPAGES, proc 0 stack, and page table pages to start at 1MB. The value
+ * that ends up in esi, which points to the kernel page directory, is
+ * used by the rest of locore to build the tables.
+ * esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPDE(number of kernel
+ * page table pages) is then passed on the stack to init386(first) as
+ * the value first. esi should ALWAYS be page aligned!!
+ */
+ movl %esi,%ecx /* Get current first availiable address */
+ cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
+ jge 1f /* yep, don't need to check for room */
+ addl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* XXX the 4 is for kstack */
+ /* space for kstack, PTD and PTE's */
+ cmpl $(640*1024),%ecx /* see if it fits in low memory */
+ jle 1f /* yep, don't need to relocate it */
+ movl $0x100000,%esi /* won't fit, so start it at 1MB */
1:
-/* physical address of Idle Address space */
+/* clear pagetables, page directory, stack, etc... */
+ movl %esi,%edi /* base (page directory) */
+ movl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* amount to clear */
+ xorl %eax,%eax /* specify zero fill */
+ cld
+ rep
+ stosb
+
+/* physical address of Idle proc/kernel page directory */
movl %esi,_IdlePTD-KERNBASE
/*
@@ -312,69 +272,100 @@ ENTRY(btext)
/*
* Map Kernel
- * N.B. don't bother with making kernel text RO, as 386
- * ignores R/W AND U/S bits on kernel access (only v works) !
*
* First step - build page tables
*/
- movl %esi,%ecx /* this much memory, */
- shrl $PGSHIFT,%ecx /* for this many pte s */
- addl $UPAGES+4,%ecx /* including our early context */
- cmpl $0xa0,%ecx /* XXX - cover debugger pages */
+#if defined (KGDB) || defined (BDE_DEBUGGER)
+ movl _KERNend-KERNBASE,%ecx /* this much memory, */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+#ifdef BDE_DEBUGGER
+ cmpl $0xa0,%ecx /* XXX - cover debugger pages */
jae 1f
movl $0xa0,%ecx
1:
- movl $PG_V|PG_KW,%eax /* having these bits set, */
- lea (4*NBPG)(%esi),%ebx /* physical address of KPT in proc 0, */
- movl %ebx,_KPTphys-KERNBASE /* in the kernel page table, */
+#endif /* BDE_DEBUGGER */
+ movl $PG_V|PG_KW,%eax /* having these bits set, */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
fillkpt
+#else /* !KGDB && !BDE_DEBUGGER */
+ /* write protect kernel text (doesn't do a thing for 386's - only 486's) */
+ movl $_etext-KERNBASE,%ecx /* get size of text */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+ movl $PG_V|PG_KR,%eax /* specify read only */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
+ fillkpt
+
+ /* data and bss are r/w */
+ andl $PG_FRAME,%eax /* strip to just addr of bss */
+ movl _KERNend-KERNBASE,%ecx /* calculate size */
+ subl %eax,%ecx
+ shrl $PGSHIFT,%ecx
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ fillkpt
+#endif
+
+/* now initialize the page dir, upages, p0stack PT, and page tables */
+
+ movl $(1+UPAGES+1+NKPDE),%ecx /* number of PTEs */
+ movl %esi,%eax /* phys address of PTD */
+ andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ movl %esi,%ebx /* calculate pte offset to ptd */
+ shrl $PGSHIFT-2,%ebx
+ addl %esi,%ebx /* address of page directory */
+ addl $((1+UPAGES+1)*NBPG),%ebx /* offset to kernel page tables */
+ fillkpt
+
/* map I/O memory map */
- movl $0x100-0xa0,%ecx /* for this many pte s, */
- movl $(0xa0000|PG_V|PG_UW),%eax /* having these bits set,(perhaps URW?) XXX 06 Aug 92 */
- movl %ebx,_atdevphys-KERNBASE /* remember phys addr of ptes */
+ movl _KPTphys-KERNBASE,%ebx /* base of kernel page tables */
+ lea (0xa0 * PTESIZE)(%ebx),%ebx /* hardwire ISA hole at KERNBASE + 0xa0000 */
+ movl $0x100-0xa0,%ecx /* for this many pte s, */
+ movl $(0xa0000|PG_V|PG_KW),%eax /* valid, kernel read/write */
+ movl %ebx,_atdevphys-KERNBASE /* save phys addr of ptes */
fillkpt
/* map proc 0's kernel stack into user page table page */
- movl $UPAGES,%ecx /* for this many pte s, */
- lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
- lea (KERNBASE)(%eax),%edx
- movl %edx,_proc0paddr-KERNBASE
- /* remember VA for 0th process init */
- orl $PG_V|PG_KW,%eax /* having these bits set, */
- lea (3*NBPG)(%esi),%ebx /* physical address of stack pt in proc 0 */
- addl $(PPTEOFF*4),%ebx
+ movl $UPAGES,%ecx /* for this many pte s, */
+ lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
+ lea (KERNBASE)(%eax),%edx /* change into virtual addr */
+ movl %edx,_proc0paddr-KERNBASE /* save VA for proc 0 init */
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ lea ((1+UPAGES)*NBPG)(%esi),%ebx /* addr of stack page table in proc 0 */
+ addl $(KSTKPTEOFF * PTESIZE),%ebx /* offset to kernel stack PTE */
fillkpt
/*
- * Construct a page table directory
- * (of page directory elements - pde's)
+ * Initialize kernel page table directory
*/
/* install a pde for temporary double map of bottom of VA */
- lea (4*NBPG)(%esi),%eax /* physical address of kernel page table */
- orl $PG_V|PG_UW,%eax /* pde entry is valid XXX 06 Aug 92 */
- movl %eax,(%esi) /* which is where temp maps! */
+ movl _KPTphys-KERNBASE,%eax
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ movl %eax,(%esi) /* which is where temp maps! */
- /* kernel pde's */
- movl $(NKPDE),%ecx /* for this many pde s, */
- lea (KPTDI*4)(%esi),%ebx /* offset of pde for kernel */
+ /* initialize kernel pde's */
+ movl $(NKPDE),%ecx /* for this many PDEs */
+ lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
fillkpt
/* install a pde recursively mapping page directory as a page table! */
- movl %esi,%eax /* phys address of ptd in proc 0 */
- orl $PG_V|PG_UW,%eax /* pde entry is valid XXX 06 Aug 92 */
- movl %eax,PTDPTDI*4(%esi) /* which is where PTmap maps! */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,PTDPTDI*PDESIZE(%esi) /* which is where PTmap maps! */
/* install a pde to map kernel stack for proc 0 */
- lea (3*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
- orl $PG_V|PG_KW,%eax /* pde entry is valid */
- movl %eax,PPDROFF*4(%esi) /* which is where kernel stack maps! */
+ lea ((1+UPAGES)*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,KSTKPTDI*PDESIZE(%esi) /* which is where kernel stack maps! */
+#ifdef BDE_DEBUGGER
/* copy and convert stuff from old gdt and idt for debugger */
- cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
+ cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
jne 1f
movb $1,_bdb_exists-KERNBASE
1:
@@ -382,23 +373,23 @@ ENTRY(btext)
subl $2*6,%esp
sgdt (%esp)
- movl 2(%esp),%esi /* base address of current gdt */
+ movl 2(%esp),%esi /* base address of current gdt */
movl $_gdt-KERNBASE,%edi
movl %edi,2(%esp)
movl $8*18/4,%ecx
- rep /* copy gdt */
+ rep /* copy gdt */
movsl
movl $_gdt-KERNBASE,-8+2(%edi) /* adjust gdt self-ptr */
movb $0x92,-8+5(%edi)
sidt 6(%esp)
- movl 6+2(%esp),%esi /* base address of current idt */
- movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
+ movl 6+2(%esp),%esi /* base address of current idt */
+ movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
movw 8(%esi),%ax
movl %eax,bdb_dbg_ljmp+1-KERNBASE /* ... immediate offset ... */
movl 8+2(%esi),%eax
movw %ax,bdb_dbg_ljmp+5-KERNBASE /* ... and selector for ljmp */
- movl 24+4(%esi),%eax /* same for bpt descriptor */
+ movl 24+4(%esi),%eax /* same for bpt descriptor */
movw 24(%esi),%ax
movl %eax,bdb_bpt_ljmp+1-KERNBASE
movl 24+2(%esi),%eax
@@ -407,7 +398,7 @@ ENTRY(btext)
movl $_idt-KERNBASE,%edi
movl %edi,6+2(%esp)
movl $8*4/4,%ecx
- rep /* copy idt */
+ rep /* copy idt */
movsl
lgdt (%esp)
@@ -415,12 +406,13 @@ ENTRY(btext)
addl $2*6,%esp
popal
+#endif
/* load base of page directory and enable mapping */
- movl %esi,%eax /* phys address of ptd in proc 0 */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
orl $I386_CR3PAT,%eax
- movl %eax,%cr3 /* load ptd addr into mmu */
- movl %cr0,%eax /* get control word */
+ movl %eax,%cr3 /* load ptd addr into mmu */
+ movl %cr0,%eax /* get control word */
/*
* XXX it is now safe to always (attempt to) set CR0_WP and to set up
* the page tables assuming it works, so USE_486_WRITE_PROTECT will go
@@ -430,58 +422,59 @@ ENTRY(btext)
#ifdef USE_486_WRITE_PROTECT
orl $CR0_PE|CR0_PG|CR0_WP,%eax /* enable paging */
#else
- orl $CR0_PE|CR0_PG,%eax /* enable paging */
+ orl $CR0_PE|CR0_PG,%eax /* enable paging */
#endif
- movl %eax,%cr0 /* and let's page NOW! */
+ movl %eax,%cr0 /* and let's page NOW! */
- pushl $begin /* jump to high mem */
+ pushl $begin /* jump to high mem */
ret
begin: /* now running relocated at KERNBASE where the system is linked to run */
- .globl _Crtat /* XXX - locore should not know about */
- movl _Crtat,%eax /* variables of device drivers (pccons)! */
+ .globl _Crtat /* XXX - locore should not know about */
+ movl _Crtat,%eax /* variables of device drivers (pccons)! */
subl $(KERNBASE+0xA0000),%eax
- movl _atdevphys,%edx /* get pte PA */
- subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
- shll $PGSHIFT-2,%edx /* corresponding to virt offset */
- addl $KERNBASE,%edx /* add virtual base */
+ movl _atdevphys,%edx /* get pte PA */
+ subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
+ shll $PGSHIFT-2,%edx /* corresponding to virt offset */
+ addl $KERNBASE,%edx /* add virtual base */
movl %edx,_atdevbase
addl %eax,%edx
movl %edx,_Crtat
- /* set up bootstrap stack */
+ /* set up bootstrap stack - 48 bytes */
movl $_kstack+UPAGES*NBPG-4*12,%esp /* bootstrap stack end location */
- xorl %eax,%eax /* mark end of frames */
+ xorl %eax,%eax /* mark end of frames */
movl %eax,%ebp
movl _proc0paddr,%eax
movl %esi,PCB_CR3(%eax)
+#ifdef BDE_DEBUGGER
/* relocate debugger gdt entries */
- movl $_gdt+8*9,%eax /* adjust slots 9-17 */
+ movl $_gdt+8*9,%eax /* adjust slots 9-17 */
movl $9,%ecx
reloc_gdt:
- movb $0xfe,7(%eax) /* top byte of base addresses, was 0, */
- addl $8,%eax /* now KERNBASE>>24 */
+ movb $0xfe,7(%eax) /* top byte of base addresses, was 0, */
+ addl $8,%eax /* now KERNBASE>>24 */
loop reloc_gdt
cmpl $0,_bdb_exists
je 1f
int $3
1:
+#endif
/*
* Skip over the page tables and the kernel stack
- * XXX 4 is kstack size
*/
- lea (NKPDE + 4) * NBPG(%esi),%esi
+ lea ((1+UPAGES+1+NKPDE)*NBPG)(%esi),%esi
- pushl %esi /* value of first for init386(first) */
- call _init386 /* wire 386 chip for unix operation */
+ pushl %esi /* value of first for init386(first) */
+ call _init386 /* wire 386 chip for unix operation */
movl $0,_PTD
- call _main /* autoconfiguration, mountroot etc */
+ call _main /* autoconfiguration, mountroot etc */
popl %esi
/*
@@ -495,25 +488,22 @@ reloc_gdt:
movl __ucodesel,%eax
movl __udatasel,%ecx
/* build outer stack frame */
- pushl %ecx /* user ss */
- pushl $USRSTACK /* user esp */
- pushl %eax /* user cs */
- pushl $0 /* user ip */
+ pushl %ecx /* user ss */
+ pushl $USRSTACK /* user esp */
+ pushl %eax /* user cs */
+ pushl $0 /* user ip */
movl %cx,%ds
movl %cx,%es
- movl %ax,%fs /* double map cs to fs */
- movl %cx,%gs /* and ds to gs */
- lret /* goto user! */
+ movl %ax,%fs /* double map cs to fs */
+ movl %cx,%gs /* and ds to gs */
+ lret /* goto user! */
- pushl $lretmsg1 /* "should never get here!" */
+ pushl $lretmsg1 /* "should never get here!" */
call _panic
lretmsg1:
.asciz "lret: toinit\n"
- .set exec,59
- .set exit,1
-
#define LCALL(x,y) .byte 0x9a ; .long y; .word x
/*
* Icode is copied out to process 1 and executed in user mode:
@@ -521,36 +511,36 @@ lretmsg1:
* If the execve fails, process 1 exits and the system panics.
*/
NON_GPROF_ENTRY(icode)
- pushl $0 /* envp for execve() */
+ pushl $0 /* envp for execve() */
-# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
+# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
movl $argv,%eax
subl $_icode,%eax
- pushl %eax /* argp for execve() */
+ pushl %eax /* argp for execve() */
# pushl $init-_icode
movl $init,%eax
subl $_icode,%eax
- pushl %eax /* fname for execve() */
+ pushl %eax /* fname for execve() */
- pushl %eax /* dummy return address */
+ pushl %eax /* dummy return address */
- movl $exec,%eax
+ movl $SYS_execve,%eax
LCALL(0x7,0x0)
/* exit if something botches up in the above execve() */
- pushl %eax /* execve failed, the errno will do for an */
- /* exit code because errnos are < 128 */
- pushl %eax /* dummy return address */
- movl $exit,%eax
+ pushl %eax /* execve failed, the errno will do for an */
+ /* exit code because errnos are < 128 */
+ pushl %eax /* dummy return address */
+ movl $SYS_exit,%eax
LCALL(0x7,0x0)
init:
.asciz "/sbin/init"
ALIGN_DATA
argv:
- .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
- .long eicode-_icode /* argv[1] follows icode after copyout */
+ .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
+ .long eicode-_icode /* argv[1] follows icode after copyout */
.long 0
eicode:
@@ -560,1604 +550,14 @@ _szicode:
NON_GPROF_ENTRY(sigcode)
call SIGF_HANDLER(%esp)
- lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
- /* copy at 8(%esp)) */
+ lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
+ /* copy at 8(%esp)) */
pushl %eax
- pushl %eax /* junk to fake return address */
- movl $103,%eax /* XXX sigreturn() */
- LCALL(0x7,0) /* enter kernel with args on stack */
- hlt /* never gets here */
+ pushl %eax /* junk to fake return address */
+ movl $103,%eax /* XXX sigreturn() */
+ LCALL(0x7,0) /* enter kernel with args on stack */
+ hlt /* never gets here */
.globl _szsigcode
_szsigcode:
.long _szsigcode-_sigcode
-
-/*
- * Support routines for GCC, general C-callable functions
- */
-ENTRY(__udivsi3)
- movl 4(%esp),%eax
- xorl %edx,%edx
- divl 8(%esp)
- ret
-
-ENTRY(__divsi3)
- movl 4(%esp),%eax
- cltd
- idivl 8(%esp)
- ret
-
- /*
- * I/O bus instructions via C
- */
-ENTRY(inb) /* val = inb(port) */
- movl 4(%esp),%edx
- subl %eax,%eax
- NOP
- inb %dx,%al
- ret
-
-ENTRY(inw) /* val = inw(port) */
- movl 4(%esp),%edx
- subl %eax,%eax
- NOP
- inw %dx,%ax
- ret
-
-ENTRY(insb) /* insb(port, addr, cnt) */
- pushl %edi
- movw 8(%esp),%dx
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- insb
- NOP
- movl %edi,%eax
- popl %edi
- ret
-
-ENTRY(insw) /* insw(port, addr, cnt) */
- pushl %edi
- movw 8(%esp),%dx
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- insw
- NOP
- movl %edi,%eax
- popl %edi
- ret
-
-ENTRY(rtcin) /* rtcin(val) */
- movl 4(%esp),%eax
- outb %al,$0x70
- subl %eax,%eax
- inb $0x71,%al
- ret
-
-ENTRY(outb) /* outb(port, val) */
- movl 4(%esp),%edx
- NOP
- movl 8(%esp),%eax
- outb %al,%dx
- NOP
- ret
-
-ENTRY(outw) /* outw(port, val) */
- movl 4(%esp),%edx
- NOP
- movl 8(%esp),%eax
- outw %ax,%dx
- NOP
- ret
-
-ENTRY(outsb) /* outsb(port, addr, cnt) */
- pushl %esi
- movw 8(%esp),%dx
- movl 12(%esp),%esi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- outsb
- NOP
- movl %esi,%eax
- popl %esi
- ret
-
-ENTRY(outsw) /* outsw(port, addr, cnt) */
- pushl %esi
- movw 8(%esp),%dx
- movl 12(%esp),%esi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- outsw
- NOP
- movl %esi,%eax
- popl %esi
- ret
-
- /*
- * bcopy family
- */
-ENTRY(bzero) /* void bzero(void *base, u_int cnt) */
- pushl %edi
- movl 8(%esp),%edi
- movl 12(%esp),%ecx
- xorl %eax,%eax
- shrl $2,%ecx
- cld
- rep
- stosl
- movl 12(%esp),%ecx
- andl $3,%ecx
- rep
- stosb
- popl %edi
- ret
-
-ENTRY(fillw) /* fillw(pat, base, cnt) */
- pushl %edi
- movl 8(%esp),%eax
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- rep
- stosw
- popl %edi
- ret
-
-ENTRY(bcopyb)
-bcopyb:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards. */
- addl %ecx,%esi
- std
- decl %edi
- decl %esi
- rep
- movsb
- popl %edi
- popl %esi
- cld
- ret
-
-ENTRY(bcopyw)
-bcopyw:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- shrl $1,%ecx /* copy by 16-bit words */
- rep
- movsw
- adc %ecx,%ecx /* any bytes left? */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards */
- addl %ecx,%esi
- std
- andl $1,%ecx /* any fractional bytes? */
- decl %edi
- decl %esi
- rep
- movsb
- movl 20(%esp),%ecx /* copy remainder by 16-bit words */
- shrl $1,%ecx
- decl %esi
- decl %edi
- rep
- movsw
- popl %edi
- popl %esi
- cld
- ret
-
-ENTRY(bcopyx)
- movl 16(%esp),%eax
- cmpl $2,%eax
- je bcopyw /* not _bcopyw, to avoid multiple mcounts */
- cmpl $4,%eax
- je bcopy
- jmp bcopyb
-
- /*
- * (ov)bcopy(src, dst, cnt)
- * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
- */
-ALTENTRY(ovbcopy)
-ENTRY(bcopy)
-bcopy:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- shrl $2,%ecx /* copy by 32-bit words */
- rep
- movsl
- movl 20(%esp),%ecx
- andl $3,%ecx /* any bytes left? */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards */
- addl %ecx,%esi
- std
- andl $3,%ecx /* any fractional bytes? */
- decl %edi
- decl %esi
- rep
- movsb
- movl 20(%esp),%ecx /* copy remainder by 32-bit words */
- shrl $2,%ecx
- subl $3,%esi
- subl $3,%edi
- rep
- movsl
- popl %edi
- popl %esi
- cld
- ret
-
-ALTENTRY(ntohl)
-ENTRY(htonl)
- movl 4(%esp),%eax
-#ifdef i486
- /* XXX */
- /* Since Gas 1.38 does not grok bswap this has been coded as the
- * equivalent bytes. This can be changed back to bswap when we
- * upgrade to a newer version of Gas */
- /* bswap %eax */
- .byte 0x0f
- .byte 0xc8
-#else
- xchgb %al,%ah
- roll $16,%eax
- xchgb %al,%ah
-#endif
- ret
-
-ALTENTRY(ntohs)
-ENTRY(htons)
- movzwl 4(%esp),%eax
- xchgb %al,%ah
- ret
-
-/*****************************************************************************/
-/* copyout and fubyte family */
-/*****************************************************************************/
-/*
- * Access user memory from inside the kernel. These routines and possibly
- * the math- and DOS emulators should be the only places that do this.
- *
- * We have to access the memory with user's permissions, so use a segment
- * selector with RPL 3. For writes to user space we have to additionally
- * check the PTE for write permission, because the 386 does not check
- * write permissions when we are executing with EPL 0. The 486 does check
- * this if the WP bit is set in CR0, so we can use a simpler version here.
- *
- * These routines set curpcb->onfault for the time they execute. When a
- * protection violation occurs inside the functions, the trap handler
- * returns to *curpcb->onfault instead of the function.
- */
-
-
-ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
- movl _curpcb,%eax
- movl $copyout_fault,PCB_ONFAULT(%eax)
- pushl %esi
- pushl %edi
- pushl %ebx
- movl 16(%esp),%esi
- movl 20(%esp),%edi
- movl 24(%esp),%ebx
- orl %ebx,%ebx /* anything to do? */
- jz done_copyout
-
- /*
- * Check explicitly for non-user addresses. If 486 write protection
- * is being used, this check is essential because we are in kernel
- * mode so the h/w does not provide any protection against writing
- * kernel addresses.
- *
- * Otherwise, it saves having to load and restore %es to get the
- * usual segment-based protection (the destination segment for movs
- * is always %es). The other explicit checks for user-writablility
- * are not quite sufficient. They fail for the user area because
- * we mapped the user area read/write to avoid having an #ifdef in
- * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
- * addresses including 0xff800000 and 0xfc000000). I'm not sure if
- * this can be fixed. Marking the PTEs supervisor mode and the
- * PDE's user mode would almost work, but there may be a problem
- * with the self-referential PDE.
- */
- movl %edi,%eax
- addl %ebx,%eax
- jc copyout_fault
-#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
- cmpl $VM_END_USER_ADDRESS,%eax
- ja copyout_fault
-
-#ifndef USE_486_WRITE_PROTECT
- /*
- * We have to check each PTE for user write permission.
- * The checking may cause a page fault, so it is important to set
- * up everything for return via copyout_fault before here.
- */
- /* compute number of pages */
- movl %edi,%ecx
- andl $NBPG-1,%ecx
- addl %ebx,%ecx
- decl %ecx
- shrl $IDXSHIFT+2,%ecx
- incl %ecx
-
- /* compute PTE offset for start address */
- movl %edi,%edx
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
-
-1: /* check PTE for each page */
- movb _PTmap(%edx),%al
- andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
- cmpb $0x07,%al
- je 2f
-
- /* simulate a trap */
- pushl %edx
- pushl %ecx
- shll $IDXSHIFT,%edx
- pushl %edx
- call _trapwrite /* trapwrite(addr) */
- popl %edx
- popl %ecx
- popl %edx
-
- orl %eax,%eax /* if not ok, return EFAULT */
- jnz copyout_fault
-
-2:
- addl $4,%edx
- decl %ecx
- jnz 1b /* check next page */
-#endif /* ndef USE_486_WRITE_PROTECT */
-
- /* bcopy(%esi, %edi, %ebx) */
- cld
- movl %ebx,%ecx
- shrl $2,%ecx
- rep
- movsl
- movb %bl,%cl
- andb $3,%cl /* XXX can we trust the rest of %ecx on clones? */
- rep
- movsb
-
-done_copyout:
- popl %ebx
- popl %edi
- popl %esi
- xorl %eax,%eax
- movl _curpcb,%edx
- movl %eax,PCB_ONFAULT(%edx)
- ret
-
- ALIGN_TEXT
-copyout_fault:
- popl %ebx
- popl %edi
- popl %esi
- movl _curpcb,%edx
- movl $0,PCB_ONFAULT(%edx)
- movl $EFAULT,%eax
- ret
-
-ENTRY(copyin) /* copyin(from_user, to_kernel, len) */
- movl _curpcb,%eax
- movl $copyin_fault,PCB_ONFAULT(%eax)
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi /* caddr_t from */
- movl 16(%esp),%edi /* caddr_t to */
- movl 20(%esp),%ecx /* size_t len */
-
- movb %cl,%al
- shrl $2,%ecx /* copy longword-wise */
- cld
- gs
- rep
- movsl
- movb %al,%cl
- andb $3,%cl /* copy remaining bytes */
- gs
- rep
- movsb
-
- popl %edi
- popl %esi
- xorl %eax,%eax
- movl _curpcb,%edx
- movl %eax,PCB_ONFAULT(%edx)
- ret
-
- ALIGN_TEXT
-copyin_fault:
- popl %edi
- popl %esi
- movl _curpcb,%edx
- movl $0,PCB_ONFAULT(%edx)
- movl $EFAULT,%eax
- ret
-
- /*
- * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
- */
-ALTENTRY(fuiword)
-ENTRY(fuword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(fusword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movzwl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(fuibyte)
-ENTRY(fubyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movzbl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
- ALIGN_TEXT
-fusufault:
- movl _curpcb,%ecx
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- decl %eax
- ret
-
- /*
- * su{byte,sword,word}: write a byte(word, longword) to user memory
- */
-#ifdef USE_486_WRITE_PROTECT
- /*
- * we only have to set the right segment selector.
- */
-ALTENTRY(suiword)
-ENTRY(suword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movl %eax,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(susword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movw 8(%esp),%ax
- gs
- movw %ax,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(suibyte)
-ENTRY(subyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movb 8(%esp),%al
- gs
- movb %al,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-
-#else /* USE_486_WRITE_PROTECT */
- /*
- * here starts the trouble again: check PTE, twice if word crosses
- * a page boundary.
- */
- /* XXX - page boundary crossing is not handled yet */
-
-ALTENTRY(suibyte)
-ENTRY(subyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movb %al,(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(susword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movw %ax,(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(suiword)
-ENTRY(suword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movl %eax,0(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-#endif /* USE_486_WRITE_PROTECT */
-
-/*
- * copyoutstr(from, to, maxlen, int *lencopied)
- * copy a string from from to to, stop when a 0 character is reached.
- * return ENAMETOOLONG if string is longer than maxlen, and
- * EFAULT on protection violations. If lencopied is non-zero,
- * return the actual length in *lencopied.
- */
-#ifdef USE_486_WRITE_PROTECT
-
-ENTRY(copyoutstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- /*
- * gs override doesn't work for stosb. Use the same explicit check
- * as in copyout(). It's much slower now because it is per-char.
- * XXX - however, it would be faster to rewrite this function to use
- * strlen() and copyout().
- */
- cmpl $VM_END_USER_ADDRESS,%edi
- jae cpystrflt
- lodsb
- gs
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-#else /* ndef USE_486_WRITE_PROTECT */
-
-ENTRY(copyoutstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
-1:
- /*
- * It suffices to check that the first byte is in user space, because
- * we look at a page at a time and the end address is on a page
- * boundary.
- */
- cmpl $VM_END_USER_ADDRESS,%edi
- jae cpystrflt
- movl %edi,%eax
- shrl $IDXSHIFT,%eax
- andb $0xfc,%al
- movb _PTmap(%eax),%al
- andb $7,%al
- cmpb $7,%al
- je 2f
-
- /* simulate trap */
- pushl %edx
- pushl %edi
- call _trapwrite
- popl %edi
- popl %edx
- orl %eax,%eax
- jnz cpystrflt
-
-2: /* copy up to end of this page */
- movl %edi,%eax
- andl $NBPG-1,%eax
- movl $NBPG,%ecx
- subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
- cmpl %ecx,%edx
- jge 3f
- movl %edx,%ecx /* ecx = min(ecx, edx) */
-3:
- orl %ecx,%ecx
- jz 4f
- decl %ecx
- decl %edx
- lodsb
- stosb
- orb %al,%al
- jnz 3b
-
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-
-4: /* next page */
- orl %edx,%edx
- jnz 1b
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-#endif /* USE_486_WRITE_PROTECT */
-
-/*
- * copyinstr(from, to, maxlen, int *lencopied)
- * copy a string from from to to, stop when a 0 character is reached.
- * return ENAMETOOLONG if string is longer than maxlen, and
- * EFAULT on protection violations. If lencopied is non-zero,
- * return the actual length in *lencopied.
- */
-ENTRY(copyinstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- gs
- lodsb
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-cpystrflt:
- movl $EFAULT,%eax
-6: /* set *lencopied and return %eax */
- movl _curpcb,%ecx
- movl $0,PCB_ONFAULT(%ecx)
- movl 20(%esp),%ecx
- subl %edx,%ecx
- movl 24(%esp),%edx
- orl %edx,%edx
- jz 7f
- movl %ecx,(%edx)
-7:
- popl %edi
- popl %esi
- ret
-
-
-/*
- * copystr(from, to, maxlen, int *lencopied)
- */
-ENTRY(copystr)
- pushl %esi
- pushl %edi
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- lodsb
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
-
-6: /* set *lencopied and return %eax */
- movl 20(%esp),%ecx
- subl %edx,%ecx
- movl 24(%esp),%edx
- orl %edx,%edx
- jz 7f
- movl %ecx,(%edx)
-7:
- popl %edi
- popl %esi
- ret
-
-/*
- * Handling of special 386 registers and descriptor tables etc
- */
-ENTRY(lgdt) /* void lgdt(struct region_descriptor *rdp); */
- /* reload the descriptor table */
- movl 4(%esp),%eax
- lgdt (%eax)
- /* flush the prefetch q */
- jmp 1f
- nop
-1:
- /* reload "stale" selectors */
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- movl %ax,%ss
-
- /* reload code selector by turning return into intersegmental return */
- movl (%esp),%eax
- pushl %eax
-# movl $KCSEL,4(%esp)
- movl $8,4(%esp)
- lret
-
- /*
- * void lidt(struct region_descriptor *rdp);
- */
-ENTRY(lidt)
- movl 4(%esp),%eax
- lidt (%eax)
- ret
-
- /*
- * void lldt(u_short sel)
- */
-ENTRY(lldt)
- lldt 4(%esp)
- ret
-
- /*
- * void ltr(u_short sel)
- */
-ENTRY(ltr)
- ltr 4(%esp)
- ret
-
-ENTRY(ssdtosd) /* ssdtosd(*ssdp,*sdp) */
- pushl %ebx
- movl 8(%esp),%ecx
- movl 8(%ecx),%ebx
- shll $16,%ebx
- movl (%ecx),%edx
- roll $16,%edx
- movb %dh,%bl
- movb %dl,%bh
- rorl $8,%ebx
- movl 4(%ecx),%eax
- movw %ax,%dx
- andl $0xf0000,%eax
- orl %eax,%ebx
- movl 12(%esp),%ecx
- movl %edx,(%ecx)
- movl %ebx,4(%ecx)
- popl %ebx
- ret
-
-
-ENTRY(tlbflush) /* tlbflush() */
- movl %cr3,%eax
- orl $I386_CR3PAT,%eax
- movl %eax,%cr3
- ret
-
-
-ENTRY(load_cr0) /* load_cr0(cr0) */
- movl 4(%esp),%eax
- movl %eax,%cr0
- ret
-
-
-ENTRY(rcr0) /* rcr0() */
- movl %cr0,%eax
- ret
-
-
-ENTRY(rcr2) /* rcr2() */
- movl %cr2,%eax
- ret
-
-
-ENTRY(rcr3) /* rcr3() */
- movl %cr3,%eax
- ret
-
-
-ENTRY(load_cr3) /* void load_cr3(caddr_t cr3) */
- movl 4(%esp),%eax
- orl $I386_CR3PAT,%eax
- movl %eax,%cr3
- ret
-
-
-/*****************************************************************************/
-/* setjump, longjump */
-/*****************************************************************************/
-
-ENTRY(setjmp)
- movl 4(%esp),%eax
- movl %ebx,(%eax) /* save ebx */
- movl %esp,4(%eax) /* save esp */
- movl %ebp,8(%eax) /* save ebp */
- movl %esi,12(%eax) /* save esi */
- movl %edi,16(%eax) /* save edi */
- movl (%esp),%edx /* get rta */
- movl %edx,20(%eax) /* save eip */
- xorl %eax,%eax /* return(0); */
- ret
-
-ENTRY(longjmp)
- movl 4(%esp),%eax
- movl (%eax),%ebx /* restore ebx */
- movl 4(%eax),%esp /* restore esp */
- movl 8(%eax),%ebp /* restore ebp */
- movl 12(%eax),%esi /* restore esi */
- movl 16(%eax),%edi /* restore edi */
- movl 20(%eax),%edx /* get rta */
- movl %edx,(%esp) /* put in return frame */
- xorl %eax,%eax /* return(1); */
- incl %eax
- ret
-
-
-/*****************************************************************************/
-/* Scheduling */
-/*****************************************************************************/
-
-/*
- * The following primitives manipulate the run queues.
- * _whichqs tells which of the 32 queues _qs
- * have processes in them. Setrq puts processes into queues, Remrq
- * removes them from queues. The running process is on no queue,
- * other processes are on a queue related to p->p_pri, divided by 4
- * actually to shrink the 0-127 range of priorities into the 32 available
- * queues.
- */
-
- .globl _whichqs,_qs,_cnt,_panic
- .comm _noproc,4
- .comm _runrun,4
-
-/*
- * Setrq(p)
- *
- * Call should be made at spl6(), and p->p_stat should be SRUN
- */
-ENTRY(setrq)
- movl 4(%esp),%eax
- cmpl $0,P_RLINK(%eax) /* should not be on q already */
- je set1
- pushl $set2
- call _panic
-set1:
- movzbl P_PRI(%eax),%edx
- shrl $2,%edx
- btsl %edx,_whichqs /* set q full bit */
- shll $3,%edx
- addl $_qs,%edx /* locate q hdr */
- movl %edx,P_LINK(%eax) /* link process on tail of q */
- movl P_RLINK(%edx),%ecx
- movl %ecx,P_RLINK(%eax)
- movl %eax,P_RLINK(%edx)
- movl %eax,P_LINK(%ecx)
- ret
-
-set2: .asciz "setrq"
-
-/*
- * Remrq(p)
- *
- * Call should be made at spl6().
- */
-ENTRY(remrq)
- movl 4(%esp),%eax
- movzbl P_PRI(%eax),%edx
- shrl $2,%edx
- btrl %edx,_whichqs /* clear full bit, panic if clear already */
- jb rem1
- pushl $rem3
- call _panic
-rem1:
- pushl %edx
- movl P_LINK(%eax),%ecx /* unlink process */
- movl P_RLINK(%eax),%edx
- movl %edx,P_RLINK(%ecx)
- movl P_RLINK(%eax),%ecx
- movl P_LINK(%eax),%edx
- movl %edx,P_LINK(%ecx)
- popl %edx
- movl $_qs,%ecx
- shll $3,%edx
- addl %edx,%ecx
- cmpl P_LINK(%ecx),%ecx /* q still has something? */
- je rem2
- shrl $3,%edx /* yes, set bit as still full */
- btsl %edx,_whichqs
-rem2:
- movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
- ret
-
-rem3: .asciz "remrq"
-sw0: .asciz "swtch"
-
-/*
- * When no processes are on the runq, Swtch branches to idle
- * to wait for something to come ready.
- */
- ALIGN_TEXT
-Idle:
- sti
- SHOW_STI
-
- ALIGN_TEXT
-idle_loop:
- call _spl0
- cmpl $0,_whichqs
- jne sw1
- hlt /* wait for interrupt */
- jmp idle_loop
-
-badsw:
- pushl $sw0
- call _panic
- /*NOTREACHED*/
-
-/*
- * Swtch()
- */
- SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
-ENTRY(swtch)
-
- incl _cnt+V_SWTCH
-
- /* switch to new process. first, save context as needed */
-
- movl _curproc,%ecx
-
- /* if no process to save, don't bother */
- testl %ecx,%ecx
- je sw1
-
- movl P_ADDR(%ecx),%ecx
-
- movl (%esp),%eax /* Hardware registers */
- movl %eax,PCB_EIP(%ecx)
- movl %ebx,PCB_EBX(%ecx)
- movl %esp,PCB_ESP(%ecx)
- movl %ebp,PCB_EBP(%ecx)
- movl %esi,PCB_ESI(%ecx)
- movl %edi,PCB_EDI(%ecx)
-
-#if NNPX > 0
- /* have we used fp, and need a save? */
- mov _curproc,%eax
- cmp %eax,_npxproc
- jne 1f
- pushl %ecx /* h/w bugs make saving complicated */
- leal PCB_SAVEFPU(%ecx),%eax
- pushl %eax
- call _npxsave /* do it in a big C function */
- popl %eax
- popl %ecx
-1:
-#endif /* NNPX > 0 */
-
- movl _CMAP2,%eax /* save temporary map PTE */
- movl %eax,PCB_CMAP2(%ecx) /* in our context */
- movl $0,_curproc /* out of process */
-
-# movw _cpl,%ax
-# movw %ax,PCB_IML(%ecx) /* save ipl */
-
- /* save is done, now choose a new process or idle */
-sw1:
- cli
- SHOW_CLI
- movl _whichqs,%edi
-2:
- /* XXX - bsf is sloow */
- bsfl %edi,%eax /* find a full q */
- je Idle /* if none, idle */
- /* XX update whichqs? */
-swfnd:
- btrl %eax,%edi /* clear q full status */
- jnb 2b /* if it was clear, look for another */
- movl %eax,%ebx /* save which one we are using */
-
- shll $3,%eax
- addl $_qs,%eax /* select q */
- movl %eax,%esi
-
-#ifdef DIAGNOSTIC
- cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
- je badsw /* not possible */
-#endif
-
- movl P_LINK(%eax),%ecx /* unlink from front of process q */
- movl P_LINK(%ecx),%edx
- movl %edx,P_LINK(%eax)
- movl P_RLINK(%ecx),%eax
- movl %eax,P_RLINK(%edx)
-
- cmpl P_LINK(%ecx),%esi /* q empty */
- je 3f
- btsl %ebx,%edi /* nope, set to indicate full */
-3:
- movl %edi,_whichqs /* update q status */
-
- movl $0,%eax
- movl %eax,_want_resched
-
-#ifdef DIAGNOSTIC
- cmpl %eax,P_WCHAN(%ecx)
- jne badsw
- cmpb $SRUN,P_STAT(%ecx)
- jne badsw
-#endif
-
- movl %eax,P_RLINK(%ecx) /* isolate process to run */
- movl P_ADDR(%ecx),%edx
- movl PCB_CR3(%edx),%ebx
-
- /* switch address space */
- movl %ebx,%cr3
-
- /* restore context */
- movl PCB_EBX(%edx),%ebx
- movl PCB_ESP(%edx),%esp
- movl PCB_EBP(%edx),%ebp
- movl PCB_ESI(%edx),%esi
- movl PCB_EDI(%edx),%edi
- movl PCB_EIP(%edx),%eax
- movl %eax,(%esp)
-
- movl PCB_CMAP2(%edx),%eax /* get temporary map */
- movl %eax,_CMAP2 /* reload temporary map PTE */
-
- movl %ecx,_curproc /* into next process */
- movl %edx,_curpcb
-
- pushl %edx /* save p to return */
-/*
- * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
- * I think restoring the cpl is unnecessary, but we must turn off the cli
- * now that spl*() don't do it as a side affect.
- */
- pushl PCB_IML(%edx)
- sti
- SHOW_STI
-#if 0
- call _splx
-#endif
- addl $4,%esp
-/*
- * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
- * same way. Better return a value.
- */
- popl %eax /* return(p); */
- ret
-
-ENTRY(mvesp)
- movl %esp,%eax
- ret
-/*
- * struct proc *swtch_to_inactive(p) ; struct proc *p;
- *
- * At exit of a process, move off the address space of the
- * process and onto a "safe" one. Then, on a temporary stack
- * return and run code that disposes of the old state.
- * Since this code requires a parameter from the "old" stack,
- * pass it back as a return value.
- */
-ENTRY(swtch_to_inactive)
- popl %edx /* old pc */
- popl %eax /* arg, our return value */
- movl _IdlePTD,%ecx
- movl %ecx,%cr3 /* good bye address space */
- #write buffer?
- movl $tmpstk-4,%esp /* temporary stack, compensated for call */
- jmp %edx /* return, execute remainder of cleanup */
-
-/*
- * savectx(pcb, altreturn)
- * Update pcb, saving current processor state and arranging
- * for alternate return ala longjmp in swtch if altreturn is true.
- */
-ENTRY(savectx)
- movl 4(%esp),%ecx
- movw _cpl,%ax
- movw %ax,PCB_IML(%ecx)
- movl (%esp),%eax
- movl %eax,PCB_EIP(%ecx)
- movl %ebx,PCB_EBX(%ecx)
- movl %esp,PCB_ESP(%ecx)
- movl %ebp,PCB_EBP(%ecx)
- movl %esi,PCB_ESI(%ecx)
- movl %edi,PCB_EDI(%ecx)
-
-#if NNPX > 0
- /*
- * If npxproc == NULL, then the npx h/w state is irrelevant and the
- * state had better already be in the pcb. This is true for forks
- * but not for dumps (the old book-keeping with FP flags in the pcb
- * always lost for dumps because the dump pcb has 0 flags).
- *
- * If npxproc != NULL, then we have to save the npx h/w state to
- * npxproc's pcb and copy it to the requested pcb, or save to the
- * requested pcb and reload. Copying is easier because we would
- * have to handle h/w bugs for reloading. We used to lose the
- * parent's npx state for forks by forgetting to reload.
- */
- mov _npxproc,%eax
- testl %eax,%eax
- je 1f
-
- pushl %ecx
- movl P_ADDR(%eax),%eax
- leal PCB_SAVEFPU(%eax),%eax
- pushl %eax
- pushl %eax
- call _npxsave
- popl %eax
- popl %eax
- popl %ecx
-
- pushl %ecx
- pushl $108+8*2 /* XXX h/w state size + padding */
- leal PCB_SAVEFPU(%ecx),%ecx
- pushl %ecx
- pushl %eax
- call _bcopy
- addl $12,%esp
- popl %ecx
-1:
-#endif /* NNPX > 0 */
-
- movl _CMAP2,%edx /* save temporary map PTE */
- movl %edx,PCB_CMAP2(%ecx) /* in our context */
-
- cmpl $0,8(%esp)
- je 1f
- movl %esp,%edx /* relocate current sp relative to pcb */
- subl $_kstack,%edx /* (sp is relative to kstack): */
- addl %edx,%ecx /* pcb += sp - kstack; */
- movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
- /* this mess deals with replicating register state gcc hides */
- movl 12(%esp),%eax
- movl %eax,12(%ecx)
- movl 16(%esp),%eax
- movl %eax,16(%ecx)
- movl 20(%esp),%eax
- movl %eax,20(%ecx)
- movl 24(%esp),%eax
- movl %eax,24(%ecx)
-1:
- xorl %eax,%eax /* return 0 */
- ret
-
-/*
- * addupc(int pc, struct uprof *up, int ticks):
- * update profiling information for the user process.
- */
-ENTRY(addupc)
- pushl %ebp
- movl %esp,%ebp
- movl 12(%ebp),%edx /* up */
- movl 8(%ebp),%eax /* pc */
-
- subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
- jl L1 /* if (pc < 0) return */
-
- shrl $1,%eax /* praddr = pc >> 1 */
- imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
- shrl $15,%eax /* praddr = praddr << 15 */
- andl $-2,%eax /* praddr &= ~1 */
-
- cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
- ja L1
-
-/* addl %eax,%eax /* praddr -> word offset */
- addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
- movl 16(%ebp),%ecx /* ticks */
-
- movl _curpcb,%edx
- movl $proffault,PCB_ONFAULT(%edx)
- addl %ecx,(%eax) /* storage location += ticks */
- movl $0,PCB_ONFAULT(%edx)
-L1:
- leave
- ret
-
- ALIGN_TEXT
-proffault:
- /* if we get a fault, then kill profiling all together */
- movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
- movl 12(%ebp),%ecx
- movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
- leave
- ret
-
-/* To be done: */
-ENTRY(astoff)
- ret
-
-
-/*****************************************************************************/
-/* Trap handling */
-/*****************************************************************************/
-/*
- * Trap and fault vector routines
- *
- * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
- * control. The sti's give the standard losing behaviour for ddb and kgdb.
- */
-#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
-#define TRAP(a) pushl $(a) ; jmp alltraps
-#ifdef KGDB
-# define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
-#else
-# define BPTTRAP(a) sti; TRAP(a)
-#endif
-
-IDTVEC(div)
- pushl $0; TRAP(T_DIVIDE)
-IDTVEC(dbg)
-#ifdef BDBTRAP
- BDBTRAP(dbg)
-#endif
- pushl $0; BPTTRAP(T_TRCTRAP)
-IDTVEC(nmi)
- pushl $0; TRAP(T_NMI)
-IDTVEC(bpt)
-#ifdef BDBTRAP
- BDBTRAP(bpt)
-#endif
- pushl $0; BPTTRAP(T_BPTFLT)
-IDTVEC(ofl)
- pushl $0; TRAP(T_OFLOW)
-IDTVEC(bnd)
- pushl $0; TRAP(T_BOUND)
-IDTVEC(ill)
- pushl $0; TRAP(T_PRIVINFLT)
-IDTVEC(dna)
- pushl $0; TRAP(T_DNA)
-IDTVEC(dble)
- TRAP(T_DOUBLEFLT)
- /*PANIC("Double Fault");*/
-IDTVEC(fpusegm)
- pushl $0; TRAP(T_FPOPFLT)
-IDTVEC(tss)
- TRAP(T_TSSFLT)
- /*PANIC("TSS not valid");*/
-IDTVEC(missing)
- TRAP(T_SEGNPFLT)
-IDTVEC(stk)
- TRAP(T_STKFLT)
-IDTVEC(prot)
- TRAP(T_PROTFLT)
-IDTVEC(page)
- TRAP(T_PAGEFLT)
-IDTVEC(rsvd)
- pushl $0; TRAP(T_RESERVED)
-IDTVEC(fpu)
-#if NNPX > 0
- /*
- * Handle like an interrupt so that we can call npxintr to clear the
- * error. It would be better to handle npx interrupts as traps but
- * this is difficult for nested interrupts.
- */
- pushl $0 /* dummy error code */
- pushl $T_ASTFLT
- pushal
- nop /* silly, the bug is for popal and it only
- * bites when the next instruction has a
- * complicated address mode */
- pushl %ds
- pushl %es /* now the stack frame is a trap frame */
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- pushl _cpl
- pushl $0 /* dummy unit to finish building intr frame */
- incl _cnt+V_TRAP
- call _npxintr
- jmp doreti
-#else /* NNPX > 0 */
- pushl $0; TRAP(T_ARITHTRAP)
-#endif /* NNPX > 0 */
- /* 17 - 31 reserved for future exp */
-IDTVEC(rsvd0)
- pushl $0; TRAP(17)
-IDTVEC(rsvd1)
- pushl $0; TRAP(18)
-IDTVEC(rsvd2)
- pushl $0; TRAP(19)
-IDTVEC(rsvd3)
- pushl $0; TRAP(20)
-IDTVEC(rsvd4)
- pushl $0; TRAP(21)
-IDTVEC(rsvd5)
- pushl $0; TRAP(22)
-IDTVEC(rsvd6)
- pushl $0; TRAP(23)
-IDTVEC(rsvd7)
- pushl $0; TRAP(24)
-IDTVEC(rsvd8)
- pushl $0; TRAP(25)
-IDTVEC(rsvd9)
- pushl $0; TRAP(26)
-IDTVEC(rsvd10)
- pushl $0; TRAP(27)
-IDTVEC(rsvd11)
- pushl $0; TRAP(28)
-IDTVEC(rsvd12)
- pushl $0; TRAP(29)
-IDTVEC(rsvd13)
- pushl $0; TRAP(30)
-IDTVEC(rsvd14)
- pushl $0; TRAP(31)
-
- SUPERALIGN_TEXT
-alltraps:
- pushal
- nop
- pushl %ds
- pushl %es
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
-calltrap:
- incl _cnt+V_TRAP
- call _trap
- /*
- * Return through doreti to handle ASTs. Have to change trap frame
- * to interrupt frame.
- */
- movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
- pushl _cpl
- pushl $0 /* dummy unit */
- jmp doreti
-
-#ifdef KGDB
-/*
- * This code checks for a kgdb trap, then falls through
- * to the regular trap code.
- */
- SUPERALIGN_TEXT
-bpttraps:
- pushal
- nop
- pushl %es
- pushl %ds
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
- /* non-kernel mode? */
- jne calltrap /* yes */
- call _kgdb_trap_glue
- jmp calltrap
-#endif
-
-/*
- * Call gate entry for syscall
- */
- SUPERALIGN_TEXT
-IDTVEC(syscall)
- pushfl /* only for stupid carry bit and more stupid wait3 cc kludge */
- /* XXX - also for direction flag (bzero, etc. clear it) */
- pushal /* only need eax,ecx,edx - trap resaves others */
- nop
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
- incl _cnt+V_SYSCALL /* kml 3/25/93 */
- call _syscall
- /*
- * Return through doreti to handle ASTs. Have to change syscall frame
- * to interrupt frame.
- *
- * XXX - we should have set up the frame earlier to avoid the
- * following popal/pushal (not much can be done to avoid shuffling
- * the flags). Consistent frames would simplify things all over.
- */
- movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
- movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
- movl 32+8(%esp),%ecx
- movl %ebx,32+0(%esp)
- movl %ecx,32+4(%esp)
- movl %eax,32+8(%esp)
- popal
- nop
- pushl $0 /* dummy error code */
- pushl $T_ASTFLT
- pushal
- nop
- movl __udatasel,%eax /* switch back to user segments */
- pushl %eax /* XXX - better to preserve originals? */
- pushl %eax
- pushl _cpl
- pushl $0
- jmp doreti
-
-#ifdef SHOW_A_LOT
-/*
- * 'show_bits' was too big when defined as a macro. The line length for some
- * enclosing macro was too big for gas. Perhaps the code would have blown
- * the cache anyway.
- */
- ALIGN_TEXT
-show_bits:
- pushl %eax
- SHOW_BIT(0)
- SHOW_BIT(1)
- SHOW_BIT(2)
- SHOW_BIT(3)
- SHOW_BIT(4)
- SHOW_BIT(5)
- SHOW_BIT(6)
- SHOW_BIT(7)
- SHOW_BIT(8)
- SHOW_BIT(9)
- SHOW_BIT(10)
- SHOW_BIT(11)
- SHOW_BIT(12)
- SHOW_BIT(13)
- SHOW_BIT(14)
- SHOW_BIT(15)
- popl %eax
- ret
-
- .data
-bit_colors:
- .byte GREEN,RED,0,0
- .text
-
-#endif /* SHOW_A_LOT */
-
-
-/*
- * include generated interrupt vectors and ISA intr code
- */
-#include "i386/isa/vector.s"
-#include "i386/isa/icu.s"
diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s
index d808571..4b4e36f 100644
--- a/sys/amd64/amd64/locore.s
+++ b/sys/amd64/amd64/locore.s
@@ -34,86 +34,41 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
- * $Id: locore.s,v 1.8 1993/10/15 10:34:19 rgrimes Exp $
+ * $Id$
*/
-
/*
- * locore.s: 4BSD machine support for the Intel 386
- * Preliminary version
- * Written by William F. Jolitz, 386BSD Project
+ * locore.s: FreeBSD machine support for the Intel 386
+ * originally from: locore.s, by William F. Jolitz
+ *
+ * Substantially rewritten by David Greenman, Rod Grimes,
+ * Bruce Evans, Wolfgang Solfrank, and many others.
*/
-#include "npx.h"
+#include "npx.h" /* for NNPX */
-#include "assym.s"
-#include "machine/psl.h"
-#include "machine/pte.h"
+#include "assym.s" /* system definitions */
+#include "machine/psl.h" /* processor status longword defs */
+#include "machine/pte.h" /* page table entry definitions */
-#include "errno.h"
+#include "errno.h" /* error return codes */
-#include "machine/trap.h"
+#include "machine/specialreg.h" /* x86 special registers */
+#include "i386/isa/debug.h" /* BDE debugging macros */
+#include "machine/cputypes.h" /* x86 cpu type definitions */
-#include "machine/specialreg.h"
-#include "i386/isa/debug.h"
-#include "machine/cputypes.h"
+#include "syscall.h" /* system call numbers */
-#define KDSEL 0x10
-#define SEL_RPL_MASK 0x0003
-#define TRAPF_CS_OFF (13 * 4)
+#include "machine/asmacros.h" /* miscellaneous asm macros */
/*
+ * XXX
+ *
* Note: This version greatly munged to avoid various assembler errors
* that may be fixed in newer versions of gas. Perhaps newer versions
* will have more pleasant appearance.
*/
- .set IDXSHIFT,10
-
-#define ALIGN_DATA .align 2
-#define ALIGN_TEXT .align 2,0x90 /* 4-byte boundaries, NOP-filled */
-#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte boundaries better for 486 */
-
-#define GEN_ENTRY(name) ALIGN_TEXT; .globl name; name:
-#define NON_GPROF_ENTRY(name) GEN_ENTRY(_/**/name)
-
-#ifdef GPROF
-/*
- * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
- * over the mcounting.
- */
-#define ALTENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
-#define ENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; 2:
-/*
- * The call to mcount supports the usual (bad) conventions. We allocate
- * some data and pass a pointer to it although the 386BSD doesn't use
- * the data. We set up a frame before calling mcount because that is
- * the standard convention although it makes work for both mcount and
- * callers.
- */
-#define MCOUNT .data; ALIGN_DATA; 1:; .long 0; .text; \
- pushl %ebp; movl %esp,%ebp; \
- movl $1b,%eax; call mcount; popl %ebp
-#else
-/*
- * ALTENTRY() has to align because it is before a corresponding ENTRY().
- * ENTRY() has to align to because there may be no ALTENTRY() before it.
- * If there is a previous ALTENTRY() then the alignment code is empty.
- */
-#define ALTENTRY(name) GEN_ENTRY(_/**/name)
-#define ENTRY(name) GEN_ENTRY(_/**/name)
-#endif
-
-/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
-/* XXX: NOP and FASTER_NOP are misleadingly named */
-#ifdef DUMMY_NOPS /* this will break some older machines */
-#define FASTER_NOP
-#define NOP
-#else
-#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
-#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
-#endif
-
/*
* PTmap is recursive pagemap at top of virtual address space.
* Within PTmap, the page directory can be found (third indirection).
@@ -121,8 +76,9 @@
.globl _PTmap,_PTD,_PTDpde,_Sysmap
.set _PTmap,PTDPTDI << PDRSHIFT
.set _PTD,_PTmap + (PTDPTDI * NBPG)
- .set _PTDpde,_PTD + (PTDPTDI * 4) /* XXX 4=sizeof pde */
+ .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
+/* Sysmap is the base address of the kernel page tables */
.set _Sysmap,_PTmap + (KPTDI * NBPG)
/*
@@ -132,7 +88,7 @@
.globl _APTmap,_APTD,_APTDpde
.set _APTmap,APTDPTDI << PDRSHIFT
.set _APTD,_APTmap + (APTDPTDI * NBPG)
- .set _APTDpde,_PTD + (APTDPTDI * 4) /* XXX 4=sizeof pde */
+ .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
/*
* Access to each processes kernel stack is via a region of
@@ -141,44 +97,41 @@
*/
.set _kstack,USRSTACK
.globl _kstack
- .set PPDROFF,0x3F6
- .set PPTEOFF,0x400-UPAGES /* 0x3FE */
-
/*
* Globals
*/
.data
.globl _esym
-_esym: .long 0 /* ptr to end of syms */
+_esym: .long 0 /* ptr to end of syms */
.globl _boothowto,_bootdev,_curpcb
.globl _cpu,_cold,_atdevbase
-_cpu: .long 0 /* are we 386, 386sx, or 486 */
-_cold: .long 1 /* cold till we are not */
-_atdevbase: .long 0 /* location of start of iomem in virtual */
-_atdevphys: .long 0 /* location of device mapping ptes (phys) */
+_cpu: .long 0 /* are we 386, 386sx, or 486 */
+_cold: .long 1 /* cold till we are not */
+_atdevbase: .long 0 /* location of start of iomem in virtual */
+_atdevphys: .long 0 /* location of device mapping ptes (phys) */
+
+ .globl _KERNend
+_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
.globl _IdlePTD,_KPTphys
-_IdlePTD: .long 0
-_KPTphys: .long 0
+_IdlePTD: .long 0 /* phys addr of kernel PTD */
+_KPTphys: .long 0 /* phys addr of kernel page tables */
- .globl _curpcb, _whichqs
-_curpcb: .long 0 /* pointer to curproc's PCB area */
-_whichqs: .long 0 /* which run queues have data */
+ .globl _cyloffset
+_cyloffset: .long 0 /* cylinder offset from boot blocks */
- .globl _cyloffset,_proc0paddr
-_cyloffset: .long 0
-_proc0paddr: .long 0
+ .globl _proc0paddr
+_proc0paddr: .long 0 /* address of proc 0 address space */
- /* Stuff for network ASTs */
- .globl _softem,_netisr,_astpending,_want_resched
-_softem: .long 0 /* WFJ only knows... */
-_netisr: .long 0 /* set with bits for which queue to service */
-_astpending: .long 0 /* tells us an AST needs to be taken */
-_want_resched: .long 0 /* we need to re-schedule */
+#ifdef BDE_DEBUGGER
+ .globl _bdb_exists /* flag to indicate BDE debugger is available */
+ .long 0
+#endif
+ .globl tmpstk
.space 512
tmpstk:
@@ -193,9 +146,9 @@ tmpstk:
* Also the entry point (jumped to directly from the boot blocks).
*/
ENTRY(btext)
- movw $0x1234,0x472 /* warm boot */
+ movw $0x1234,0x472 /* warm boot */
jmp 1f
- .space 0x500 /* skip over warm boot shit */
+ .space 0x500 /* skip over warm boot shit */
/*
* pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
@@ -249,53 +202,60 @@ ENTRY(btext)
* Oops, the gdt is in the carcass of the boot program so clearing
* the rest of memory is still not possible.
*/
- movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
+ movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
/*
* Virtual address space of kernel:
*
* text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
- * 0 1 2 3 4
+ * pages: 1 UPAGES (2) 1 NKPDE (7)
*/
/* find end of kernel image */
movl $_end-KERNBASE,%ecx
- addl $NBPG-1,%ecx /* page align up */
+ addl $NBPG-1,%ecx /* page align up */
andl $~(NBPG-1),%ecx
- movl %ecx,%esi /* esi=start of tables */
+ movl %ecx,%esi /* esi=start of tables */
+ movl %ecx,_KERNend-KERNBASE /* save end of kernel */
-/* clear bss and memory for bootstrap pagetables. */
+/* clear bss */
movl $_edata-KERNBASE,%edi
- subl %edi,%ecx
- addl $(UPAGES+5)*NBPG,%ecx /* size of tables */
-
- xorl %eax,%eax /* pattern */
+ subl %edi,%ecx /* get mount to clear */
+ xorl %eax,%eax /* specify zero fill */
cld
rep
stosb
/*
- * If we are loaded at 0x0 check to see if we have space for the
- * page tables pages after the kernel and before the 640K ISA memory
- * hole. If we do not have space relocate the page table pages and
- * the kernel stack to start at 1MB. The value that ends up in esi
- * is used by the rest of locore to build the tables. Locore adjusts
- * esi each time it allocates a structure and then passes the final
- * value to init386(first) as the value first. esi should ALWAYS
- * be page aligned!!
- */
- movl %esi,%ecx /* Get current first availiable address */
- cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
- jge 1f /* yep, don't need to check for room */
- addl $(NKPDE + 4) * NBPG,%ecx /* XXX the 4 is for kstack */
- /* space for kstack, PTD and PTE's */
- cmpl $(640*1024),%ecx
- /* see if it fits in low memory */
- jle 1f /* yep, don't need to relocate it */
- movl $0x100000,%esi /* won't fit, so start it at 1MB */
+ * If we are loaded at 0x0 check to see if we have space for the
+ * page dir/tables and stack area after the kernel and before the 640K
+ * ISA memory hole. If we do not have space relocate the page directory,
+ * UPAGES, proc 0 stack, and page table pages to start at 1MB. The value
+ * that ends up in esi, which points to the kernel page directory, is
+ * used by the rest of locore to build the tables.
+ * esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPDE(number of kernel
+ * page table pages) is then passed on the stack to init386(first) as
+ * the value first. esi should ALWAYS be page aligned!!
+ */
+ movl %esi,%ecx /* Get current first availiable address */
+ cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
+ jge 1f /* yep, don't need to check for room */
+ addl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* XXX the 4 is for kstack */
+ /* space for kstack, PTD and PTE's */
+ cmpl $(640*1024),%ecx /* see if it fits in low memory */
+ jle 1f /* yep, don't need to relocate it */
+ movl $0x100000,%esi /* won't fit, so start it at 1MB */
1:
-/* physical address of Idle Address space */
+/* clear pagetables, page directory, stack, etc... */
+ movl %esi,%edi /* base (page directory) */
+ movl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* amount to clear */
+ xorl %eax,%eax /* specify zero fill */
+ cld
+ rep
+ stosb
+
+/* physical address of Idle proc/kernel page directory */
movl %esi,_IdlePTD-KERNBASE
/*
@@ -312,69 +272,100 @@ ENTRY(btext)
/*
* Map Kernel
- * N.B. don't bother with making kernel text RO, as 386
- * ignores R/W AND U/S bits on kernel access (only v works) !
*
* First step - build page tables
*/
- movl %esi,%ecx /* this much memory, */
- shrl $PGSHIFT,%ecx /* for this many pte s */
- addl $UPAGES+4,%ecx /* including our early context */
- cmpl $0xa0,%ecx /* XXX - cover debugger pages */
+#if defined (KGDB) || defined (BDE_DEBUGGER)
+ movl _KERNend-KERNBASE,%ecx /* this much memory, */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+#ifdef BDE_DEBUGGER
+ cmpl $0xa0,%ecx /* XXX - cover debugger pages */
jae 1f
movl $0xa0,%ecx
1:
- movl $PG_V|PG_KW,%eax /* having these bits set, */
- lea (4*NBPG)(%esi),%ebx /* physical address of KPT in proc 0, */
- movl %ebx,_KPTphys-KERNBASE /* in the kernel page table, */
+#endif /* BDE_DEBUGGER */
+ movl $PG_V|PG_KW,%eax /* having these bits set, */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
fillkpt
+#else /* !KGDB && !BDE_DEBUGGER */
+ /* write protect kernel text (doesn't do a thing for 386's - only 486's) */
+ movl $_etext-KERNBASE,%ecx /* get size of text */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+ movl $PG_V|PG_KR,%eax /* specify read only */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
+ fillkpt
+
+ /* data and bss are r/w */
+ andl $PG_FRAME,%eax /* strip to just addr of bss */
+ movl _KERNend-KERNBASE,%ecx /* calculate size */
+ subl %eax,%ecx
+ shrl $PGSHIFT,%ecx
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ fillkpt
+#endif
+
+/* now initialize the page dir, upages, p0stack PT, and page tables */
+
+ movl $(1+UPAGES+1+NKPDE),%ecx /* number of PTEs */
+ movl %esi,%eax /* phys address of PTD */
+ andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ movl %esi,%ebx /* calculate pte offset to ptd */
+ shrl $PGSHIFT-2,%ebx
+ addl %esi,%ebx /* address of page directory */
+ addl $((1+UPAGES+1)*NBPG),%ebx /* offset to kernel page tables */
+ fillkpt
+
/* map I/O memory map */
- movl $0x100-0xa0,%ecx /* for this many pte s, */
- movl $(0xa0000|PG_V|PG_UW),%eax /* having these bits set,(perhaps URW?) XXX 06 Aug 92 */
- movl %ebx,_atdevphys-KERNBASE /* remember phys addr of ptes */
+ movl _KPTphys-KERNBASE,%ebx /* base of kernel page tables */
+ lea (0xa0 * PTESIZE)(%ebx),%ebx /* hardwire ISA hole at KERNBASE + 0xa0000 */
+ movl $0x100-0xa0,%ecx /* for this many pte s, */
+ movl $(0xa0000|PG_V|PG_KW),%eax /* valid, kernel read/write */
+ movl %ebx,_atdevphys-KERNBASE /* save phys addr of ptes */
fillkpt
/* map proc 0's kernel stack into user page table page */
- movl $UPAGES,%ecx /* for this many pte s, */
- lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
- lea (KERNBASE)(%eax),%edx
- movl %edx,_proc0paddr-KERNBASE
- /* remember VA for 0th process init */
- orl $PG_V|PG_KW,%eax /* having these bits set, */
- lea (3*NBPG)(%esi),%ebx /* physical address of stack pt in proc 0 */
- addl $(PPTEOFF*4),%ebx
+ movl $UPAGES,%ecx /* for this many pte s, */
+ lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
+ lea (KERNBASE)(%eax),%edx /* change into virtual addr */
+ movl %edx,_proc0paddr-KERNBASE /* save VA for proc 0 init */
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ lea ((1+UPAGES)*NBPG)(%esi),%ebx /* addr of stack page table in proc 0 */
+ addl $(KSTKPTEOFF * PTESIZE),%ebx /* offset to kernel stack PTE */
fillkpt
/*
- * Construct a page table directory
- * (of page directory elements - pde's)
+ * Initialize kernel page table directory
*/
/* install a pde for temporary double map of bottom of VA */
- lea (4*NBPG)(%esi),%eax /* physical address of kernel page table */
- orl $PG_V|PG_UW,%eax /* pde entry is valid XXX 06 Aug 92 */
- movl %eax,(%esi) /* which is where temp maps! */
+ movl _KPTphys-KERNBASE,%eax
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ movl %eax,(%esi) /* which is where temp maps! */
- /* kernel pde's */
- movl $(NKPDE),%ecx /* for this many pde s, */
- lea (KPTDI*4)(%esi),%ebx /* offset of pde for kernel */
+ /* initialize kernel pde's */
+ movl $(NKPDE),%ecx /* for this many PDEs */
+ lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
fillkpt
/* install a pde recursively mapping page directory as a page table! */
- movl %esi,%eax /* phys address of ptd in proc 0 */
- orl $PG_V|PG_UW,%eax /* pde entry is valid XXX 06 Aug 92 */
- movl %eax,PTDPTDI*4(%esi) /* which is where PTmap maps! */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,PTDPTDI*PDESIZE(%esi) /* which is where PTmap maps! */
/* install a pde to map kernel stack for proc 0 */
- lea (3*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
- orl $PG_V|PG_KW,%eax /* pde entry is valid */
- movl %eax,PPDROFF*4(%esi) /* which is where kernel stack maps! */
+ lea ((1+UPAGES)*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,KSTKPTDI*PDESIZE(%esi) /* which is where kernel stack maps! */
+#ifdef BDE_DEBUGGER
/* copy and convert stuff from old gdt and idt for debugger */
- cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
+ cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
jne 1f
movb $1,_bdb_exists-KERNBASE
1:
@@ -382,23 +373,23 @@ ENTRY(btext)
subl $2*6,%esp
sgdt (%esp)
- movl 2(%esp),%esi /* base address of current gdt */
+ movl 2(%esp),%esi /* base address of current gdt */
movl $_gdt-KERNBASE,%edi
movl %edi,2(%esp)
movl $8*18/4,%ecx
- rep /* copy gdt */
+ rep /* copy gdt */
movsl
movl $_gdt-KERNBASE,-8+2(%edi) /* adjust gdt self-ptr */
movb $0x92,-8+5(%edi)
sidt 6(%esp)
- movl 6+2(%esp),%esi /* base address of current idt */
- movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
+ movl 6+2(%esp),%esi /* base address of current idt */
+ movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
movw 8(%esi),%ax
movl %eax,bdb_dbg_ljmp+1-KERNBASE /* ... immediate offset ... */
movl 8+2(%esi),%eax
movw %ax,bdb_dbg_ljmp+5-KERNBASE /* ... and selector for ljmp */
- movl 24+4(%esi),%eax /* same for bpt descriptor */
+ movl 24+4(%esi),%eax /* same for bpt descriptor */
movw 24(%esi),%ax
movl %eax,bdb_bpt_ljmp+1-KERNBASE
movl 24+2(%esi),%eax
@@ -407,7 +398,7 @@ ENTRY(btext)
movl $_idt-KERNBASE,%edi
movl %edi,6+2(%esp)
movl $8*4/4,%ecx
- rep /* copy idt */
+ rep /* copy idt */
movsl
lgdt (%esp)
@@ -415,12 +406,13 @@ ENTRY(btext)
addl $2*6,%esp
popal
+#endif
/* load base of page directory and enable mapping */
- movl %esi,%eax /* phys address of ptd in proc 0 */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
orl $I386_CR3PAT,%eax
- movl %eax,%cr3 /* load ptd addr into mmu */
- movl %cr0,%eax /* get control word */
+ movl %eax,%cr3 /* load ptd addr into mmu */
+ movl %cr0,%eax /* get control word */
/*
* XXX it is now safe to always (attempt to) set CR0_WP and to set up
* the page tables assuming it works, so USE_486_WRITE_PROTECT will go
@@ -430,58 +422,59 @@ ENTRY(btext)
#ifdef USE_486_WRITE_PROTECT
orl $CR0_PE|CR0_PG|CR0_WP,%eax /* enable paging */
#else
- orl $CR0_PE|CR0_PG,%eax /* enable paging */
+ orl $CR0_PE|CR0_PG,%eax /* enable paging */
#endif
- movl %eax,%cr0 /* and let's page NOW! */
+ movl %eax,%cr0 /* and let's page NOW! */
- pushl $begin /* jump to high mem */
+ pushl $begin /* jump to high mem */
ret
begin: /* now running relocated at KERNBASE where the system is linked to run */
- .globl _Crtat /* XXX - locore should not know about */
- movl _Crtat,%eax /* variables of device drivers (pccons)! */
+ .globl _Crtat /* XXX - locore should not know about */
+ movl _Crtat,%eax /* variables of device drivers (pccons)! */
subl $(KERNBASE+0xA0000),%eax
- movl _atdevphys,%edx /* get pte PA */
- subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
- shll $PGSHIFT-2,%edx /* corresponding to virt offset */
- addl $KERNBASE,%edx /* add virtual base */
+ movl _atdevphys,%edx /* get pte PA */
+ subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
+ shll $PGSHIFT-2,%edx /* corresponding to virt offset */
+ addl $KERNBASE,%edx /* add virtual base */
movl %edx,_atdevbase
addl %eax,%edx
movl %edx,_Crtat
- /* set up bootstrap stack */
+ /* set up bootstrap stack - 48 bytes */
movl $_kstack+UPAGES*NBPG-4*12,%esp /* bootstrap stack end location */
- xorl %eax,%eax /* mark end of frames */
+ xorl %eax,%eax /* mark end of frames */
movl %eax,%ebp
movl _proc0paddr,%eax
movl %esi,PCB_CR3(%eax)
+#ifdef BDE_DEBUGGER
/* relocate debugger gdt entries */
- movl $_gdt+8*9,%eax /* adjust slots 9-17 */
+ movl $_gdt+8*9,%eax /* adjust slots 9-17 */
movl $9,%ecx
reloc_gdt:
- movb $0xfe,7(%eax) /* top byte of base addresses, was 0, */
- addl $8,%eax /* now KERNBASE>>24 */
+ movb $0xfe,7(%eax) /* top byte of base addresses, was 0, */
+ addl $8,%eax /* now KERNBASE>>24 */
loop reloc_gdt
cmpl $0,_bdb_exists
je 1f
int $3
1:
+#endif
/*
* Skip over the page tables and the kernel stack
- * XXX 4 is kstack size
*/
- lea (NKPDE + 4) * NBPG(%esi),%esi
+ lea ((1+UPAGES+1+NKPDE)*NBPG)(%esi),%esi
- pushl %esi /* value of first for init386(first) */
- call _init386 /* wire 386 chip for unix operation */
+ pushl %esi /* value of first for init386(first) */
+ call _init386 /* wire 386 chip for unix operation */
movl $0,_PTD
- call _main /* autoconfiguration, mountroot etc */
+ call _main /* autoconfiguration, mountroot etc */
popl %esi
/*
@@ -495,25 +488,22 @@ reloc_gdt:
movl __ucodesel,%eax
movl __udatasel,%ecx
/* build outer stack frame */
- pushl %ecx /* user ss */
- pushl $USRSTACK /* user esp */
- pushl %eax /* user cs */
- pushl $0 /* user ip */
+ pushl %ecx /* user ss */
+ pushl $USRSTACK /* user esp */
+ pushl %eax /* user cs */
+ pushl $0 /* user ip */
movl %cx,%ds
movl %cx,%es
- movl %ax,%fs /* double map cs to fs */
- movl %cx,%gs /* and ds to gs */
- lret /* goto user! */
+ movl %ax,%fs /* double map cs to fs */
+ movl %cx,%gs /* and ds to gs */
+ lret /* goto user! */
- pushl $lretmsg1 /* "should never get here!" */
+ pushl $lretmsg1 /* "should never get here!" */
call _panic
lretmsg1:
.asciz "lret: toinit\n"
- .set exec,59
- .set exit,1
-
#define LCALL(x,y) .byte 0x9a ; .long y; .word x
/*
* Icode is copied out to process 1 and executed in user mode:
@@ -521,36 +511,36 @@ lretmsg1:
* If the execve fails, process 1 exits and the system panics.
*/
NON_GPROF_ENTRY(icode)
- pushl $0 /* envp for execve() */
+ pushl $0 /* envp for execve() */
-# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
+# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
movl $argv,%eax
subl $_icode,%eax
- pushl %eax /* argp for execve() */
+ pushl %eax /* argp for execve() */
# pushl $init-_icode
movl $init,%eax
subl $_icode,%eax
- pushl %eax /* fname for execve() */
+ pushl %eax /* fname for execve() */
- pushl %eax /* dummy return address */
+ pushl %eax /* dummy return address */
- movl $exec,%eax
+ movl $SYS_execve,%eax
LCALL(0x7,0x0)
/* exit if something botches up in the above execve() */
- pushl %eax /* execve failed, the errno will do for an */
- /* exit code because errnos are < 128 */
- pushl %eax /* dummy return address */
- movl $exit,%eax
+ pushl %eax /* execve failed, the errno will do for an */
+ /* exit code because errnos are < 128 */
+ pushl %eax /* dummy return address */
+ movl $SYS_exit,%eax
LCALL(0x7,0x0)
init:
.asciz "/sbin/init"
ALIGN_DATA
argv:
- .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
- .long eicode-_icode /* argv[1] follows icode after copyout */
+ .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
+ .long eicode-_icode /* argv[1] follows icode after copyout */
.long 0
eicode:
@@ -560,1604 +550,14 @@ _szicode:
NON_GPROF_ENTRY(sigcode)
call SIGF_HANDLER(%esp)
- lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
- /* copy at 8(%esp)) */
+ lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
+ /* copy at 8(%esp)) */
pushl %eax
- pushl %eax /* junk to fake return address */
- movl $103,%eax /* XXX sigreturn() */
- LCALL(0x7,0) /* enter kernel with args on stack */
- hlt /* never gets here */
+ pushl %eax /* junk to fake return address */
+ movl $103,%eax /* XXX sigreturn() */
+ LCALL(0x7,0) /* enter kernel with args on stack */
+ hlt /* never gets here */
.globl _szsigcode
_szsigcode:
.long _szsigcode-_sigcode
-
-/*
- * Support routines for GCC, general C-callable functions
- */
-ENTRY(__udivsi3)
- movl 4(%esp),%eax
- xorl %edx,%edx
- divl 8(%esp)
- ret
-
-ENTRY(__divsi3)
- movl 4(%esp),%eax
- cltd
- idivl 8(%esp)
- ret
-
- /*
- * I/O bus instructions via C
- */
-ENTRY(inb) /* val = inb(port) */
- movl 4(%esp),%edx
- subl %eax,%eax
- NOP
- inb %dx,%al
- ret
-
-ENTRY(inw) /* val = inw(port) */
- movl 4(%esp),%edx
- subl %eax,%eax
- NOP
- inw %dx,%ax
- ret
-
-ENTRY(insb) /* insb(port, addr, cnt) */
- pushl %edi
- movw 8(%esp),%dx
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- insb
- NOP
- movl %edi,%eax
- popl %edi
- ret
-
-ENTRY(insw) /* insw(port, addr, cnt) */
- pushl %edi
- movw 8(%esp),%dx
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- insw
- NOP
- movl %edi,%eax
- popl %edi
- ret
-
-ENTRY(rtcin) /* rtcin(val) */
- movl 4(%esp),%eax
- outb %al,$0x70
- subl %eax,%eax
- inb $0x71,%al
- ret
-
-ENTRY(outb) /* outb(port, val) */
- movl 4(%esp),%edx
- NOP
- movl 8(%esp),%eax
- outb %al,%dx
- NOP
- ret
-
-ENTRY(outw) /* outw(port, val) */
- movl 4(%esp),%edx
- NOP
- movl 8(%esp),%eax
- outw %ax,%dx
- NOP
- ret
-
-ENTRY(outsb) /* outsb(port, addr, cnt) */
- pushl %esi
- movw 8(%esp),%dx
- movl 12(%esp),%esi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- outsb
- NOP
- movl %esi,%eax
- popl %esi
- ret
-
-ENTRY(outsw) /* outsw(port, addr, cnt) */
- pushl %esi
- movw 8(%esp),%dx
- movl 12(%esp),%esi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- outsw
- NOP
- movl %esi,%eax
- popl %esi
- ret
-
- /*
- * bcopy family
- */
-ENTRY(bzero) /* void bzero(void *base, u_int cnt) */
- pushl %edi
- movl 8(%esp),%edi
- movl 12(%esp),%ecx
- xorl %eax,%eax
- shrl $2,%ecx
- cld
- rep
- stosl
- movl 12(%esp),%ecx
- andl $3,%ecx
- rep
- stosb
- popl %edi
- ret
-
-ENTRY(fillw) /* fillw(pat, base, cnt) */
- pushl %edi
- movl 8(%esp),%eax
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- rep
- stosw
- popl %edi
- ret
-
-ENTRY(bcopyb)
-bcopyb:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards. */
- addl %ecx,%esi
- std
- decl %edi
- decl %esi
- rep
- movsb
- popl %edi
- popl %esi
- cld
- ret
-
-ENTRY(bcopyw)
-bcopyw:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- shrl $1,%ecx /* copy by 16-bit words */
- rep
- movsw
- adc %ecx,%ecx /* any bytes left? */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards */
- addl %ecx,%esi
- std
- andl $1,%ecx /* any fractional bytes? */
- decl %edi
- decl %esi
- rep
- movsb
- movl 20(%esp),%ecx /* copy remainder by 16-bit words */
- shrl $1,%ecx
- decl %esi
- decl %edi
- rep
- movsw
- popl %edi
- popl %esi
- cld
- ret
-
-ENTRY(bcopyx)
- movl 16(%esp),%eax
- cmpl $2,%eax
- je bcopyw /* not _bcopyw, to avoid multiple mcounts */
- cmpl $4,%eax
- je bcopy
- jmp bcopyb
-
- /*
- * (ov)bcopy(src, dst, cnt)
- * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
- */
-ALTENTRY(ovbcopy)
-ENTRY(bcopy)
-bcopy:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- shrl $2,%ecx /* copy by 32-bit words */
- rep
- movsl
- movl 20(%esp),%ecx
- andl $3,%ecx /* any bytes left? */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards */
- addl %ecx,%esi
- std
- andl $3,%ecx /* any fractional bytes? */
- decl %edi
- decl %esi
- rep
- movsb
- movl 20(%esp),%ecx /* copy remainder by 32-bit words */
- shrl $2,%ecx
- subl $3,%esi
- subl $3,%edi
- rep
- movsl
- popl %edi
- popl %esi
- cld
- ret
-
-ALTENTRY(ntohl)
-ENTRY(htonl)
- movl 4(%esp),%eax
-#ifdef i486
- /* XXX */
- /* Since Gas 1.38 does not grok bswap this has been coded as the
- * equivalent bytes. This can be changed back to bswap when we
- * upgrade to a newer version of Gas */
- /* bswap %eax */
- .byte 0x0f
- .byte 0xc8
-#else
- xchgb %al,%ah
- roll $16,%eax
- xchgb %al,%ah
-#endif
- ret
-
-ALTENTRY(ntohs)
-ENTRY(htons)
- movzwl 4(%esp),%eax
- xchgb %al,%ah
- ret
-
-/*****************************************************************************/
-/* copyout and fubyte family */
-/*****************************************************************************/
-/*
- * Access user memory from inside the kernel. These routines and possibly
- * the math- and DOS emulators should be the only places that do this.
- *
- * We have to access the memory with user's permissions, so use a segment
- * selector with RPL 3. For writes to user space we have to additionally
- * check the PTE for write permission, because the 386 does not check
- * write permissions when we are executing with EPL 0. The 486 does check
- * this if the WP bit is set in CR0, so we can use a simpler version here.
- *
- * These routines set curpcb->onfault for the time they execute. When a
- * protection violation occurs inside the functions, the trap handler
- * returns to *curpcb->onfault instead of the function.
- */
-
-
-ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
- movl _curpcb,%eax
- movl $copyout_fault,PCB_ONFAULT(%eax)
- pushl %esi
- pushl %edi
- pushl %ebx
- movl 16(%esp),%esi
- movl 20(%esp),%edi
- movl 24(%esp),%ebx
- orl %ebx,%ebx /* anything to do? */
- jz done_copyout
-
- /*
- * Check explicitly for non-user addresses. If 486 write protection
- * is being used, this check is essential because we are in kernel
- * mode so the h/w does not provide any protection against writing
- * kernel addresses.
- *
- * Otherwise, it saves having to load and restore %es to get the
- * usual segment-based protection (the destination segment for movs
- * is always %es). The other explicit checks for user-writablility
- * are not quite sufficient. They fail for the user area because
- * we mapped the user area read/write to avoid having an #ifdef in
- * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
- * addresses including 0xff800000 and 0xfc000000). I'm not sure if
- * this can be fixed. Marking the PTEs supervisor mode and the
- * PDE's user mode would almost work, but there may be a problem
- * with the self-referential PDE.
- */
- movl %edi,%eax
- addl %ebx,%eax
- jc copyout_fault
-#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
- cmpl $VM_END_USER_ADDRESS,%eax
- ja copyout_fault
-
-#ifndef USE_486_WRITE_PROTECT
- /*
- * We have to check each PTE for user write permission.
- * The checking may cause a page fault, so it is important to set
- * up everything for return via copyout_fault before here.
- */
- /* compute number of pages */
- movl %edi,%ecx
- andl $NBPG-1,%ecx
- addl %ebx,%ecx
- decl %ecx
- shrl $IDXSHIFT+2,%ecx
- incl %ecx
-
- /* compute PTE offset for start address */
- movl %edi,%edx
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
-
-1: /* check PTE for each page */
- movb _PTmap(%edx),%al
- andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
- cmpb $0x07,%al
- je 2f
-
- /* simulate a trap */
- pushl %edx
- pushl %ecx
- shll $IDXSHIFT,%edx
- pushl %edx
- call _trapwrite /* trapwrite(addr) */
- popl %edx
- popl %ecx
- popl %edx
-
- orl %eax,%eax /* if not ok, return EFAULT */
- jnz copyout_fault
-
-2:
- addl $4,%edx
- decl %ecx
- jnz 1b /* check next page */
-#endif /* ndef USE_486_WRITE_PROTECT */
-
- /* bcopy(%esi, %edi, %ebx) */
- cld
- movl %ebx,%ecx
- shrl $2,%ecx
- rep
- movsl
- movb %bl,%cl
- andb $3,%cl /* XXX can we trust the rest of %ecx on clones? */
- rep
- movsb
-
-done_copyout:
- popl %ebx
- popl %edi
- popl %esi
- xorl %eax,%eax
- movl _curpcb,%edx
- movl %eax,PCB_ONFAULT(%edx)
- ret
-
- ALIGN_TEXT
-copyout_fault:
- popl %ebx
- popl %edi
- popl %esi
- movl _curpcb,%edx
- movl $0,PCB_ONFAULT(%edx)
- movl $EFAULT,%eax
- ret
-
-ENTRY(copyin) /* copyin(from_user, to_kernel, len) */
- movl _curpcb,%eax
- movl $copyin_fault,PCB_ONFAULT(%eax)
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi /* caddr_t from */
- movl 16(%esp),%edi /* caddr_t to */
- movl 20(%esp),%ecx /* size_t len */
-
- movb %cl,%al
- shrl $2,%ecx /* copy longword-wise */
- cld
- gs
- rep
- movsl
- movb %al,%cl
- andb $3,%cl /* copy remaining bytes */
- gs
- rep
- movsb
-
- popl %edi
- popl %esi
- xorl %eax,%eax
- movl _curpcb,%edx
- movl %eax,PCB_ONFAULT(%edx)
- ret
-
- ALIGN_TEXT
-copyin_fault:
- popl %edi
- popl %esi
- movl _curpcb,%edx
- movl $0,PCB_ONFAULT(%edx)
- movl $EFAULT,%eax
- ret
-
- /*
- * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
- */
-ALTENTRY(fuiword)
-ENTRY(fuword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(fusword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movzwl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(fuibyte)
-ENTRY(fubyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movzbl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
- ALIGN_TEXT
-fusufault:
- movl _curpcb,%ecx
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- decl %eax
- ret
-
- /*
- * su{byte,sword,word}: write a byte(word, longword) to user memory
- */
-#ifdef USE_486_WRITE_PROTECT
- /*
- * we only have to set the right segment selector.
- */
-ALTENTRY(suiword)
-ENTRY(suword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movl %eax,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(susword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movw 8(%esp),%ax
- gs
- movw %ax,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(suibyte)
-ENTRY(subyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movb 8(%esp),%al
- gs
- movb %al,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-
-#else /* USE_486_WRITE_PROTECT */
- /*
- * here starts the trouble again: check PTE, twice if word crosses
- * a page boundary.
- */
- /* XXX - page boundary crossing is not handled yet */
-
-ALTENTRY(suibyte)
-ENTRY(subyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movb %al,(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(susword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movw %ax,(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(suiword)
-ENTRY(suword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movl %eax,0(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-#endif /* USE_486_WRITE_PROTECT */
-
-/*
- * copyoutstr(from, to, maxlen, int *lencopied)
- * copy a string from from to to, stop when a 0 character is reached.
- * return ENAMETOOLONG if string is longer than maxlen, and
- * EFAULT on protection violations. If lencopied is non-zero,
- * return the actual length in *lencopied.
- */
-#ifdef USE_486_WRITE_PROTECT
-
-ENTRY(copyoutstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- /*
- * gs override doesn't work for stosb. Use the same explicit check
- * as in copyout(). It's much slower now because it is per-char.
- * XXX - however, it would be faster to rewrite this function to use
- * strlen() and copyout().
- */
- cmpl $VM_END_USER_ADDRESS,%edi
- jae cpystrflt
- lodsb
- gs
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-#else /* ndef USE_486_WRITE_PROTECT */
-
-ENTRY(copyoutstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
-1:
- /*
- * It suffices to check that the first byte is in user space, because
- * we look at a page at a time and the end address is on a page
- * boundary.
- */
- cmpl $VM_END_USER_ADDRESS,%edi
- jae cpystrflt
- movl %edi,%eax
- shrl $IDXSHIFT,%eax
- andb $0xfc,%al
- movb _PTmap(%eax),%al
- andb $7,%al
- cmpb $7,%al
- je 2f
-
- /* simulate trap */
- pushl %edx
- pushl %edi
- call _trapwrite
- popl %edi
- popl %edx
- orl %eax,%eax
- jnz cpystrflt
-
-2: /* copy up to end of this page */
- movl %edi,%eax
- andl $NBPG-1,%eax
- movl $NBPG,%ecx
- subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
- cmpl %ecx,%edx
- jge 3f
- movl %edx,%ecx /* ecx = min(ecx, edx) */
-3:
- orl %ecx,%ecx
- jz 4f
- decl %ecx
- decl %edx
- lodsb
- stosb
- orb %al,%al
- jnz 3b
-
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-
-4: /* next page */
- orl %edx,%edx
- jnz 1b
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-#endif /* USE_486_WRITE_PROTECT */
-
-/*
- * copyinstr(from, to, maxlen, int *lencopied)
- * copy a string from from to to, stop when a 0 character is reached.
- * return ENAMETOOLONG if string is longer than maxlen, and
- * EFAULT on protection violations. If lencopied is non-zero,
- * return the actual length in *lencopied.
- */
-ENTRY(copyinstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- gs
- lodsb
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-cpystrflt:
- movl $EFAULT,%eax
-6: /* set *lencopied and return %eax */
- movl _curpcb,%ecx
- movl $0,PCB_ONFAULT(%ecx)
- movl 20(%esp),%ecx
- subl %edx,%ecx
- movl 24(%esp),%edx
- orl %edx,%edx
- jz 7f
- movl %ecx,(%edx)
-7:
- popl %edi
- popl %esi
- ret
-
-
-/*
- * copystr(from, to, maxlen, int *lencopied)
- */
-ENTRY(copystr)
- pushl %esi
- pushl %edi
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- lodsb
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
-
-6: /* set *lencopied and return %eax */
- movl 20(%esp),%ecx
- subl %edx,%ecx
- movl 24(%esp),%edx
- orl %edx,%edx
- jz 7f
- movl %ecx,(%edx)
-7:
- popl %edi
- popl %esi
- ret
-
-/*
- * Handling of special 386 registers and descriptor tables etc
- */
-ENTRY(lgdt) /* void lgdt(struct region_descriptor *rdp); */
- /* reload the descriptor table */
- movl 4(%esp),%eax
- lgdt (%eax)
- /* flush the prefetch q */
- jmp 1f
- nop
-1:
- /* reload "stale" selectors */
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- movl %ax,%ss
-
- /* reload code selector by turning return into intersegmental return */
- movl (%esp),%eax
- pushl %eax
-# movl $KCSEL,4(%esp)
- movl $8,4(%esp)
- lret
-
- /*
- * void lidt(struct region_descriptor *rdp);
- */
-ENTRY(lidt)
- movl 4(%esp),%eax
- lidt (%eax)
- ret
-
- /*
- * void lldt(u_short sel)
- */
-ENTRY(lldt)
- lldt 4(%esp)
- ret
-
- /*
- * void ltr(u_short sel)
- */
-ENTRY(ltr)
- ltr 4(%esp)
- ret
-
-ENTRY(ssdtosd) /* ssdtosd(*ssdp,*sdp) */
- pushl %ebx
- movl 8(%esp),%ecx
- movl 8(%ecx),%ebx
- shll $16,%ebx
- movl (%ecx),%edx
- roll $16,%edx
- movb %dh,%bl
- movb %dl,%bh
- rorl $8,%ebx
- movl 4(%ecx),%eax
- movw %ax,%dx
- andl $0xf0000,%eax
- orl %eax,%ebx
- movl 12(%esp),%ecx
- movl %edx,(%ecx)
- movl %ebx,4(%ecx)
- popl %ebx
- ret
-
-
-ENTRY(tlbflush) /* tlbflush() */
- movl %cr3,%eax
- orl $I386_CR3PAT,%eax
- movl %eax,%cr3
- ret
-
-
-ENTRY(load_cr0) /* load_cr0(cr0) */
- movl 4(%esp),%eax
- movl %eax,%cr0
- ret
-
-
-ENTRY(rcr0) /* rcr0() */
- movl %cr0,%eax
- ret
-
-
-ENTRY(rcr2) /* rcr2() */
- movl %cr2,%eax
- ret
-
-
-ENTRY(rcr3) /* rcr3() */
- movl %cr3,%eax
- ret
-
-
-ENTRY(load_cr3) /* void load_cr3(caddr_t cr3) */
- movl 4(%esp),%eax
- orl $I386_CR3PAT,%eax
- movl %eax,%cr3
- ret
-
-
-/*****************************************************************************/
-/* setjump, longjump */
-/*****************************************************************************/
-
-ENTRY(setjmp)
- movl 4(%esp),%eax
- movl %ebx,(%eax) /* save ebx */
- movl %esp,4(%eax) /* save esp */
- movl %ebp,8(%eax) /* save ebp */
- movl %esi,12(%eax) /* save esi */
- movl %edi,16(%eax) /* save edi */
- movl (%esp),%edx /* get rta */
- movl %edx,20(%eax) /* save eip */
- xorl %eax,%eax /* return(0); */
- ret
-
-ENTRY(longjmp)
- movl 4(%esp),%eax
- movl (%eax),%ebx /* restore ebx */
- movl 4(%eax),%esp /* restore esp */
- movl 8(%eax),%ebp /* restore ebp */
- movl 12(%eax),%esi /* restore esi */
- movl 16(%eax),%edi /* restore edi */
- movl 20(%eax),%edx /* get rta */
- movl %edx,(%esp) /* put in return frame */
- xorl %eax,%eax /* return(1); */
- incl %eax
- ret
-
-
-/*****************************************************************************/
-/* Scheduling */
-/*****************************************************************************/
-
-/*
- * The following primitives manipulate the run queues.
- * _whichqs tells which of the 32 queues _qs
- * have processes in them. Setrq puts processes into queues, Remrq
- * removes them from queues. The running process is on no queue,
- * other processes are on a queue related to p->p_pri, divided by 4
- * actually to shrink the 0-127 range of priorities into the 32 available
- * queues.
- */
-
- .globl _whichqs,_qs,_cnt,_panic
- .comm _noproc,4
- .comm _runrun,4
-
-/*
- * Setrq(p)
- *
- * Call should be made at spl6(), and p->p_stat should be SRUN
- */
-ENTRY(setrq)
- movl 4(%esp),%eax
- cmpl $0,P_RLINK(%eax) /* should not be on q already */
- je set1
- pushl $set2
- call _panic
-set1:
- movzbl P_PRI(%eax),%edx
- shrl $2,%edx
- btsl %edx,_whichqs /* set q full bit */
- shll $3,%edx
- addl $_qs,%edx /* locate q hdr */
- movl %edx,P_LINK(%eax) /* link process on tail of q */
- movl P_RLINK(%edx),%ecx
- movl %ecx,P_RLINK(%eax)
- movl %eax,P_RLINK(%edx)
- movl %eax,P_LINK(%ecx)
- ret
-
-set2: .asciz "setrq"
-
-/*
- * Remrq(p)
- *
- * Call should be made at spl6().
- */
-ENTRY(remrq)
- movl 4(%esp),%eax
- movzbl P_PRI(%eax),%edx
- shrl $2,%edx
- btrl %edx,_whichqs /* clear full bit, panic if clear already */
- jb rem1
- pushl $rem3
- call _panic
-rem1:
- pushl %edx
- movl P_LINK(%eax),%ecx /* unlink process */
- movl P_RLINK(%eax),%edx
- movl %edx,P_RLINK(%ecx)
- movl P_RLINK(%eax),%ecx
- movl P_LINK(%eax),%edx
- movl %edx,P_LINK(%ecx)
- popl %edx
- movl $_qs,%ecx
- shll $3,%edx
- addl %edx,%ecx
- cmpl P_LINK(%ecx),%ecx /* q still has something? */
- je rem2
- shrl $3,%edx /* yes, set bit as still full */
- btsl %edx,_whichqs
-rem2:
- movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
- ret
-
-rem3: .asciz "remrq"
-sw0: .asciz "swtch"
-
-/*
- * When no processes are on the runq, Swtch branches to idle
- * to wait for something to come ready.
- */
- ALIGN_TEXT
-Idle:
- sti
- SHOW_STI
-
- ALIGN_TEXT
-idle_loop:
- call _spl0
- cmpl $0,_whichqs
- jne sw1
- hlt /* wait for interrupt */
- jmp idle_loop
-
-badsw:
- pushl $sw0
- call _panic
- /*NOTREACHED*/
-
-/*
- * Swtch()
- */
- SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
-ENTRY(swtch)
-
- incl _cnt+V_SWTCH
-
- /* switch to new process. first, save context as needed */
-
- movl _curproc,%ecx
-
- /* if no process to save, don't bother */
- testl %ecx,%ecx
- je sw1
-
- movl P_ADDR(%ecx),%ecx
-
- movl (%esp),%eax /* Hardware registers */
- movl %eax,PCB_EIP(%ecx)
- movl %ebx,PCB_EBX(%ecx)
- movl %esp,PCB_ESP(%ecx)
- movl %ebp,PCB_EBP(%ecx)
- movl %esi,PCB_ESI(%ecx)
- movl %edi,PCB_EDI(%ecx)
-
-#if NNPX > 0
- /* have we used fp, and need a save? */
- mov _curproc,%eax
- cmp %eax,_npxproc
- jne 1f
- pushl %ecx /* h/w bugs make saving complicated */
- leal PCB_SAVEFPU(%ecx),%eax
- pushl %eax
- call _npxsave /* do it in a big C function */
- popl %eax
- popl %ecx
-1:
-#endif /* NNPX > 0 */
-
- movl _CMAP2,%eax /* save temporary map PTE */
- movl %eax,PCB_CMAP2(%ecx) /* in our context */
- movl $0,_curproc /* out of process */
-
-# movw _cpl,%ax
-# movw %ax,PCB_IML(%ecx) /* save ipl */
-
- /* save is done, now choose a new process or idle */
-sw1:
- cli
- SHOW_CLI
- movl _whichqs,%edi
-2:
- /* XXX - bsf is sloow */
- bsfl %edi,%eax /* find a full q */
- je Idle /* if none, idle */
- /* XX update whichqs? */
-swfnd:
- btrl %eax,%edi /* clear q full status */
- jnb 2b /* if it was clear, look for another */
- movl %eax,%ebx /* save which one we are using */
-
- shll $3,%eax
- addl $_qs,%eax /* select q */
- movl %eax,%esi
-
-#ifdef DIAGNOSTIC
- cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
- je badsw /* not possible */
-#endif
-
- movl P_LINK(%eax),%ecx /* unlink from front of process q */
- movl P_LINK(%ecx),%edx
- movl %edx,P_LINK(%eax)
- movl P_RLINK(%ecx),%eax
- movl %eax,P_RLINK(%edx)
-
- cmpl P_LINK(%ecx),%esi /* q empty */
- je 3f
- btsl %ebx,%edi /* nope, set to indicate full */
-3:
- movl %edi,_whichqs /* update q status */
-
- movl $0,%eax
- movl %eax,_want_resched
-
-#ifdef DIAGNOSTIC
- cmpl %eax,P_WCHAN(%ecx)
- jne badsw
- cmpb $SRUN,P_STAT(%ecx)
- jne badsw
-#endif
-
- movl %eax,P_RLINK(%ecx) /* isolate process to run */
- movl P_ADDR(%ecx),%edx
- movl PCB_CR3(%edx),%ebx
-
- /* switch address space */
- movl %ebx,%cr3
-
- /* restore context */
- movl PCB_EBX(%edx),%ebx
- movl PCB_ESP(%edx),%esp
- movl PCB_EBP(%edx),%ebp
- movl PCB_ESI(%edx),%esi
- movl PCB_EDI(%edx),%edi
- movl PCB_EIP(%edx),%eax
- movl %eax,(%esp)
-
- movl PCB_CMAP2(%edx),%eax /* get temporary map */
- movl %eax,_CMAP2 /* reload temporary map PTE */
-
- movl %ecx,_curproc /* into next process */
- movl %edx,_curpcb
-
- pushl %edx /* save p to return */
-/*
- * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
- * I think restoring the cpl is unnecessary, but we must turn off the cli
- * now that spl*() don't do it as a side affect.
- */
- pushl PCB_IML(%edx)
- sti
- SHOW_STI
-#if 0
- call _splx
-#endif
- addl $4,%esp
-/*
- * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
- * same way. Better return a value.
- */
- popl %eax /* return(p); */
- ret
-
-ENTRY(mvesp)
- movl %esp,%eax
- ret
-/*
- * struct proc *swtch_to_inactive(p) ; struct proc *p;
- *
- * At exit of a process, move off the address space of the
- * process and onto a "safe" one. Then, on a temporary stack
- * return and run code that disposes of the old state.
- * Since this code requires a parameter from the "old" stack,
- * pass it back as a return value.
- */
-ENTRY(swtch_to_inactive)
- popl %edx /* old pc */
- popl %eax /* arg, our return value */
- movl _IdlePTD,%ecx
- movl %ecx,%cr3 /* good bye address space */
- #write buffer?
- movl $tmpstk-4,%esp /* temporary stack, compensated for call */
- jmp %edx /* return, execute remainder of cleanup */
-
-/*
- * savectx(pcb, altreturn)
- * Update pcb, saving current processor state and arranging
- * for alternate return ala longjmp in swtch if altreturn is true.
- */
-ENTRY(savectx)
- movl 4(%esp),%ecx
- movw _cpl,%ax
- movw %ax,PCB_IML(%ecx)
- movl (%esp),%eax
- movl %eax,PCB_EIP(%ecx)
- movl %ebx,PCB_EBX(%ecx)
- movl %esp,PCB_ESP(%ecx)
- movl %ebp,PCB_EBP(%ecx)
- movl %esi,PCB_ESI(%ecx)
- movl %edi,PCB_EDI(%ecx)
-
-#if NNPX > 0
- /*
- * If npxproc == NULL, then the npx h/w state is irrelevant and the
- * state had better already be in the pcb. This is true for forks
- * but not for dumps (the old book-keeping with FP flags in the pcb
- * always lost for dumps because the dump pcb has 0 flags).
- *
- * If npxproc != NULL, then we have to save the npx h/w state to
- * npxproc's pcb and copy it to the requested pcb, or save to the
- * requested pcb and reload. Copying is easier because we would
- * have to handle h/w bugs for reloading. We used to lose the
- * parent's npx state for forks by forgetting to reload.
- */
- mov _npxproc,%eax
- testl %eax,%eax
- je 1f
-
- pushl %ecx
- movl P_ADDR(%eax),%eax
- leal PCB_SAVEFPU(%eax),%eax
- pushl %eax
- pushl %eax
- call _npxsave
- popl %eax
- popl %eax
- popl %ecx
-
- pushl %ecx
- pushl $108+8*2 /* XXX h/w state size + padding */
- leal PCB_SAVEFPU(%ecx),%ecx
- pushl %ecx
- pushl %eax
- call _bcopy
- addl $12,%esp
- popl %ecx
-1:
-#endif /* NNPX > 0 */
-
- movl _CMAP2,%edx /* save temporary map PTE */
- movl %edx,PCB_CMAP2(%ecx) /* in our context */
-
- cmpl $0,8(%esp)
- je 1f
- movl %esp,%edx /* relocate current sp relative to pcb */
- subl $_kstack,%edx /* (sp is relative to kstack): */
- addl %edx,%ecx /* pcb += sp - kstack; */
- movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
- /* this mess deals with replicating register state gcc hides */
- movl 12(%esp),%eax
- movl %eax,12(%ecx)
- movl 16(%esp),%eax
- movl %eax,16(%ecx)
- movl 20(%esp),%eax
- movl %eax,20(%ecx)
- movl 24(%esp),%eax
- movl %eax,24(%ecx)
-1:
- xorl %eax,%eax /* return 0 */
- ret
-
-/*
- * addupc(int pc, struct uprof *up, int ticks):
- * update profiling information for the user process.
- */
-ENTRY(addupc)
- pushl %ebp
- movl %esp,%ebp
- movl 12(%ebp),%edx /* up */
- movl 8(%ebp),%eax /* pc */
-
- subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
- jl L1 /* if (pc < 0) return */
-
- shrl $1,%eax /* praddr = pc >> 1 */
- imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
- shrl $15,%eax /* praddr = praddr << 15 */
- andl $-2,%eax /* praddr &= ~1 */
-
- cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
- ja L1
-
-/* addl %eax,%eax /* praddr -> word offset */
- addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
- movl 16(%ebp),%ecx /* ticks */
-
- movl _curpcb,%edx
- movl $proffault,PCB_ONFAULT(%edx)
- addl %ecx,(%eax) /* storage location += ticks */
- movl $0,PCB_ONFAULT(%edx)
-L1:
- leave
- ret
-
- ALIGN_TEXT
-proffault:
- /* if we get a fault, then kill profiling all together */
- movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
- movl 12(%ebp),%ecx
- movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
- leave
- ret
-
-/* To be done: */
-ENTRY(astoff)
- ret
-
-
-/*****************************************************************************/
-/* Trap handling */
-/*****************************************************************************/
-/*
- * Trap and fault vector routines
- *
- * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
- * control. The sti's give the standard losing behaviour for ddb and kgdb.
- */
-#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
-#define TRAP(a) pushl $(a) ; jmp alltraps
-#ifdef KGDB
-# define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
-#else
-# define BPTTRAP(a) sti; TRAP(a)
-#endif
-
-IDTVEC(div)
- pushl $0; TRAP(T_DIVIDE)
-IDTVEC(dbg)
-#ifdef BDBTRAP
- BDBTRAP(dbg)
-#endif
- pushl $0; BPTTRAP(T_TRCTRAP)
-IDTVEC(nmi)
- pushl $0; TRAP(T_NMI)
-IDTVEC(bpt)
-#ifdef BDBTRAP
- BDBTRAP(bpt)
-#endif
- pushl $0; BPTTRAP(T_BPTFLT)
-IDTVEC(ofl)
- pushl $0; TRAP(T_OFLOW)
-IDTVEC(bnd)
- pushl $0; TRAP(T_BOUND)
-IDTVEC(ill)
- pushl $0; TRAP(T_PRIVINFLT)
-IDTVEC(dna)
- pushl $0; TRAP(T_DNA)
-IDTVEC(dble)
- TRAP(T_DOUBLEFLT)
- /*PANIC("Double Fault");*/
-IDTVEC(fpusegm)
- pushl $0; TRAP(T_FPOPFLT)
-IDTVEC(tss)
- TRAP(T_TSSFLT)
- /*PANIC("TSS not valid");*/
-IDTVEC(missing)
- TRAP(T_SEGNPFLT)
-IDTVEC(stk)
- TRAP(T_STKFLT)
-IDTVEC(prot)
- TRAP(T_PROTFLT)
-IDTVEC(page)
- TRAP(T_PAGEFLT)
-IDTVEC(rsvd)
- pushl $0; TRAP(T_RESERVED)
-IDTVEC(fpu)
-#if NNPX > 0
- /*
- * Handle like an interrupt so that we can call npxintr to clear the
- * error. It would be better to handle npx interrupts as traps but
- * this is difficult for nested interrupts.
- */
- pushl $0 /* dummy error code */
- pushl $T_ASTFLT
- pushal
- nop /* silly, the bug is for popal and it only
- * bites when the next instruction has a
- * complicated address mode */
- pushl %ds
- pushl %es /* now the stack frame is a trap frame */
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- pushl _cpl
- pushl $0 /* dummy unit to finish building intr frame */
- incl _cnt+V_TRAP
- call _npxintr
- jmp doreti
-#else /* NNPX > 0 */
- pushl $0; TRAP(T_ARITHTRAP)
-#endif /* NNPX > 0 */
- /* 17 - 31 reserved for future exp */
-IDTVEC(rsvd0)
- pushl $0; TRAP(17)
-IDTVEC(rsvd1)
- pushl $0; TRAP(18)
-IDTVEC(rsvd2)
- pushl $0; TRAP(19)
-IDTVEC(rsvd3)
- pushl $0; TRAP(20)
-IDTVEC(rsvd4)
- pushl $0; TRAP(21)
-IDTVEC(rsvd5)
- pushl $0; TRAP(22)
-IDTVEC(rsvd6)
- pushl $0; TRAP(23)
-IDTVEC(rsvd7)
- pushl $0; TRAP(24)
-IDTVEC(rsvd8)
- pushl $0; TRAP(25)
-IDTVEC(rsvd9)
- pushl $0; TRAP(26)
-IDTVEC(rsvd10)
- pushl $0; TRAP(27)
-IDTVEC(rsvd11)
- pushl $0; TRAP(28)
-IDTVEC(rsvd12)
- pushl $0; TRAP(29)
-IDTVEC(rsvd13)
- pushl $0; TRAP(30)
-IDTVEC(rsvd14)
- pushl $0; TRAP(31)
-
- SUPERALIGN_TEXT
-alltraps:
- pushal
- nop
- pushl %ds
- pushl %es
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
-calltrap:
- incl _cnt+V_TRAP
- call _trap
- /*
- * Return through doreti to handle ASTs. Have to change trap frame
- * to interrupt frame.
- */
- movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
- pushl _cpl
- pushl $0 /* dummy unit */
- jmp doreti
-
-#ifdef KGDB
-/*
- * This code checks for a kgdb trap, then falls through
- * to the regular trap code.
- */
- SUPERALIGN_TEXT
-bpttraps:
- pushal
- nop
- pushl %es
- pushl %ds
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
- /* non-kernel mode? */
- jne calltrap /* yes */
- call _kgdb_trap_glue
- jmp calltrap
-#endif
-
-/*
- * Call gate entry for syscall
- */
- SUPERALIGN_TEXT
-IDTVEC(syscall)
- pushfl /* only for stupid carry bit and more stupid wait3 cc kludge */
- /* XXX - also for direction flag (bzero, etc. clear it) */
- pushal /* only need eax,ecx,edx - trap resaves others */
- nop
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
- incl _cnt+V_SYSCALL /* kml 3/25/93 */
- call _syscall
- /*
- * Return through doreti to handle ASTs. Have to change syscall frame
- * to interrupt frame.
- *
- * XXX - we should have set up the frame earlier to avoid the
- * following popal/pushal (not much can be done to avoid shuffling
- * the flags). Consistent frames would simplify things all over.
- */
- movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
- movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
- movl 32+8(%esp),%ecx
- movl %ebx,32+0(%esp)
- movl %ecx,32+4(%esp)
- movl %eax,32+8(%esp)
- popal
- nop
- pushl $0 /* dummy error code */
- pushl $T_ASTFLT
- pushal
- nop
- movl __udatasel,%eax /* switch back to user segments */
- pushl %eax /* XXX - better to preserve originals? */
- pushl %eax
- pushl _cpl
- pushl $0
- jmp doreti
-
-#ifdef SHOW_A_LOT
-/*
- * 'show_bits' was too big when defined as a macro. The line length for some
- * enclosing macro was too big for gas. Perhaps the code would have blown
- * the cache anyway.
- */
- ALIGN_TEXT
-show_bits:
- pushl %eax
- SHOW_BIT(0)
- SHOW_BIT(1)
- SHOW_BIT(2)
- SHOW_BIT(3)
- SHOW_BIT(4)
- SHOW_BIT(5)
- SHOW_BIT(6)
- SHOW_BIT(7)
- SHOW_BIT(8)
- SHOW_BIT(9)
- SHOW_BIT(10)
- SHOW_BIT(11)
- SHOW_BIT(12)
- SHOW_BIT(13)
- SHOW_BIT(14)
- SHOW_BIT(15)
- popl %eax
- ret
-
- .data
-bit_colors:
- .byte GREEN,RED,0,0
- .text
-
-#endif /* SHOW_A_LOT */
-
-
-/*
- * include generated interrupt vectors and ISA intr code
- */
-#include "i386/isa/vector.s"
-#include "i386/isa/icu.s"
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 75ace1f..61948b0 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
- * $Id: machdep.c,v 1.14 1993/10/29 09:06:56 davidg Exp $
+ * $Id: machdep.c,v 1.15 1993/11/07 21:47:00 wollman Exp $
*/
#include "npx.h"
@@ -93,6 +93,10 @@ static unsigned int avail_remaining;
#define EXPECT_BASEMEM 640 /* The expected base memory*/
#define INFORM_WAIT 1 /* Set to pause berfore crash in weird cases*/
+#ifndef PANIC_REBOOT_WAIT_TIME
+#define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
+#endif
+
/*
* Declare these as initialized data so we can patch them.
*/
@@ -625,13 +629,34 @@ boot(arghowto)
savectx(&dumppcb, 0);
dumppcb.pcb_ptd = rcr3();
dumpsys();
- /*NOTREACHED*/
+
+ if (PANIC_REBOOT_WAIT_TIME != 0) {
+ if (PANIC_REBOOT_WAIT_TIME != -1) {
+ int loop;
+ printf("Automatic reboot in %d seconds - press a key on the console to abort\n",
+ PANIC_REBOOT_WAIT_TIME);
+ for (loop = PANIC_REBOOT_WAIT_TIME; loop > 0; --loop) {
+ DELAY(1000 * 1000); /* one second */
+ if (sgetc(1)) /* Did user type a key? */
+ break;
+ }
+ if (!loop)
+ goto die;
+ }
+ } else { /* zero time specified - reboot NOW */
+ goto die;
+ }
+ printf("--> Press a key on the console to reboot <--\n");
+ cngetc();
}
}
#ifdef lint
dummy = 0; dummy = dummy;
printf("howto %d, devtype %d\n", arghowto, devtype);
#endif
+die:
+ printf("Rebooting...\n");
+ DELAY (100000); /* wait 100ms for printf's to complete */
cpu_reset();
for(;;) ;
/*NOTREACHED*/
@@ -681,8 +706,6 @@ dumpsys()
printf("succeeded\n");
break;
}
- printf("\n\n");
- DELAY(1000);
}
#ifdef HZ
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index f35dd0f..ed8ff6a 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.6 1993/10/12 15:09:37 rgrimes Exp $
+ * $Id: pmap.c,v 1.7 1993/10/15 10:34:25 rgrimes Exp $
*/
/*
@@ -229,7 +229,7 @@ extern int IdlePTD;
avail_end -= i386_round_page(sizeof(struct msgbuf));
mem_size = physmem << PG_SHIFT;
- virtual_avail = (vm_offset_t)atdevbase + 0x100000 - 0xa0000 + 10*NBPG;
+ virtual_avail = (vm_offset_t)KERNBASE + avail_start;
virtual_end = VM_MAX_KERNEL_ADDRESS;
i386pagesperpage = PAGE_SIZE / NBPG;
@@ -332,10 +332,10 @@ pmap_init(phys_start, phys_end)
(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
&addr, (0x100000-0xa0000), FALSE);
- addr = (vm_offset_t) KERNBASE + KPTphys/* *NBPG */;
+ addr = (vm_offset_t) KERNBASE + IdlePTD;
vm_object_reference(kernel_object);
(void) vm_map_find(kernel_map, kernel_object, addr,
- &addr, 2*NBPG, FALSE);
+ &addr, (NKPDE+4)*NBPG, FALSE);
/*
* Allocate memory for random pmap data structures. Includes the
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
new file mode 100644
index 0000000..a2ed642
--- /dev/null
+++ b/sys/amd64/amd64/support.S
@@ -0,0 +1,1031 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "assym.s" /* system definitions */
+#include "errno.h" /* error return codes */
+#include "machine/asmacros.h" /* miscellaneous asm macros */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define IDXSHIFT 10
+
+/*
+ * Support routines for GCC, general C-callable functions
+ */
+ENTRY(__udivsi3)
+ movl 4(%esp),%eax
+ xorl %edx,%edx
+ divl 8(%esp)
+ ret
+
+ENTRY(__divsi3)
+ movl 4(%esp),%eax
+ cltd
+ idivl 8(%esp)
+ ret
+
+ /*
+ * I/O bus instructions via C
+ */
+ENTRY(inb) /* val = inb(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ NOP
+ inb %dx,%al
+ ret
+
+ENTRY(inw) /* val = inw(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ NOP
+ inw %dx,%ax
+ ret
+
+ENTRY(insb) /* insb(port, addr, cnt) */
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ insb
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(insw) /* insw(port, addr, cnt) */
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ insw
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(rtcin) /* rtcin(val) */
+ movl 4(%esp),%eax
+ outb %al,$0x70
+ NOP
+ xorl %eax,%eax
+ inb $0x71,%al
+ ret
+
+ENTRY(outb) /* outb(port, val) */
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outb %al,%dx
+ NOP
+ ret
+
+ENTRY(outw) /* outw(port, val) */
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outw %ax,%dx
+ NOP
+ ret
+
+ENTRY(outsb) /* outsb(port, addr, cnt) */
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ outsb
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ENTRY(outsw) /* outsw(port, addr, cnt) */
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ outsw
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+/*
+ * bcopy family
+ */
+/* void bzero(void *base, u_int cnt) */
+ENTRY(bzero)
+ pushl %edi
+ movl 8(%esp),%edi
+ movl 12(%esp),%ecx
+ xorl %eax,%eax
+ shrl $2,%ecx
+ cld
+ rep
+ stosl
+ movl 12(%esp),%ecx
+ andl $3,%ecx
+ rep
+ stosb
+ popl %edi
+ ret
+
+/* fillw(pat, base, cnt) */
+ENTRY(fillw)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosw
+ popl %edi
+ ret
+
+ENTRY(bcopyb)
+bcopyb:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards. */
+ addl %ecx,%esi
+ std
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyw)
+bcopyw:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $1,%ecx /* copy by 16-bit words */
+ rep
+ movsw
+ adc %ecx,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $1,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 16-bit words */
+ shrl $1,%ecx
+ decl %esi
+ decl %edi
+ rep
+ movsw
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyx)
+ movl 16(%esp),%eax
+ cmpl $2,%eax
+ je bcopyw /* not _bcopyw, to avoid multiple mcounts */
+ cmpl $4,%eax
+ je bcopy
+ jmp bcopyb
+
+/*
+ * (ov)bcopy(src, dst, cnt)
+ * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
+ */
+ALTENTRY(ovbcopy)
+ENTRY(bcopy)
+bcopy:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $2,%ecx /* copy by 32-bit words */
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $3,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 32-bit words */
+ shrl $2,%ecx
+ subl $3,%esi
+ subl $3,%edi
+ rep
+ movsl
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ALTENTRY(ntohl)
+ENTRY(htonl)
+ movl 4(%esp),%eax
+#ifdef i486
+/* XXX */
+/* Since Gas 1.38 does not grok bswap this has been coded as the
+ * equivalent bytes. This can be changed back to bswap when we
+ * upgrade to a newer version of Gas */
+ /* bswap %eax */
+ .byte 0x0f
+ .byte 0xc8
+#else
+ xchgb %al,%ah
+ roll $16,%eax
+ xchgb %al,%ah
+#endif
+ ret
+
+ALTENTRY(ntohs)
+ENTRY(htons)
+ movzwl 4(%esp),%eax
+ xchgb %al,%ah
+ ret
+
+/*****************************************************************************/
+/* copyout and fubyte family */
+/*****************************************************************************/
+/*
+ * Access user memory from inside the kernel. These routines and possibly
+ * the math- and DOS emulators should be the only places that do this.
+ *
+ * We have to access the memory with user's permissions, so use a segment
+ * selector with RPL 3. For writes to user space we have to additionally
+ * check the PTE for write permission, because the 386 does not check
+ * write permissions when we are executing with EPL 0. The 486 does check
+ * this if the WP bit is set in CR0, so we can use a simpler version here.
+ *
+ * These routines set curpcb->onfault for the time they execute. When a
+ * protection violation occurs inside the functions, the trap handler
+ * returns to *curpcb->onfault instead of the function.
+ */
+
+
+ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
+ movl _curpcb,%eax
+ movl $copyout_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ pushl %ebx
+ movl 16(%esp),%esi
+ movl 20(%esp),%edi
+ movl 24(%esp),%ebx
+ orl %ebx,%ebx /* anything to do? */
+ jz done_copyout
+
+ /*
+ * Check explicitly for non-user addresses. If 486 write protection
+ * is being used, this check is essential because we are in kernel
+ * mode so the h/w does not provide any protection against writing
+ * kernel addresses.
+ *
+ * Otherwise, it saves having to load and restore %es to get the
+ * usual segment-based protection (the destination segment for movs
+ * is always %es). The other explicit checks for user-writablility
+ * are not quite sufficient. They fail for the user area because
+ * we mapped the user area read/write to avoid having an #ifdef in
+ * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
+ * addresses including 0xff800000 and 0xfc000000). I'm not sure if
+ * this can be fixed. Marking the PTEs supervisor mode and the
+ * PDE's user mode would almost work, but there may be a problem
+ * with the self-referential PDE.
+ */
+ movl %edi,%eax
+ addl %ebx,%eax
+ jc copyout_fault
+#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
+ cmpl $VM_END_USER_ADDRESS,%eax
+ ja copyout_fault
+
+#ifndef USE_486_WRITE_PROTECT
+/*
+ * We have to check each PTE for user write permission.
+ * The checking may cause a page fault, so it is important to set
+ * up everything for return via copyout_fault before here.
+ */
+ /* compute number of pages */
+ movl %edi,%ecx
+ andl $NBPG-1,%ecx
+ addl %ebx,%ecx
+ decl %ecx
+ shrl $IDXSHIFT+2,%ecx
+ incl %ecx
+
+ /* compute PTE offset for start address */
+ movl %edi,%edx
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+
+1: /* check PTE for each page */
+ movb _PTmap(%edx),%al
+ andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
+ cmpb $0x07,%al
+ je 2f
+
+ /* simulate a trap */
+ pushl %edx
+ pushl %ecx
+ shll $IDXSHIFT,%edx
+ pushl %edx
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+ popl %ecx
+ popl %edx
+
+ orl %eax,%eax /* if not ok, return EFAULT */
+ jnz copyout_fault
+
+2:
+ addl $4,%edx
+ decl %ecx
+ jnz 1b /* check next page */
+#endif /* ndef USE_486_WRITE_PROTECT */
+
+ /* bcopy(%esi, %edi, %ebx) */
+ cld
+ movl %ebx,%ecx
+ shrl $2,%ecx
+ rep
+ movsl
+ movb %bl,%cl
+ andb $3,%cl /* XXX can we trust the rest of %ecx on clones? */
+ rep
+ movsb
+
+done_copyout:
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyout_fault:
+ popl %ebx
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/* copyin(from_user, to_kernel, len) */
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $copyin_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi /* caddr_t from */
+ movl 16(%esp),%edi /* caddr_t to */
+ movl 20(%esp),%ecx /* size_t len */
+
+ movb %cl,%al
+ shrl $2,%ecx /* copy longword-wise */
+ cld
+ gs
+ rep
+ movsl
+ movb %al,%cl
+ andb $3,%cl /* copy remaining bytes */
+ gs
+ rep
+ movsb
+
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyin_fault:
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/*
+ * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
+ */
+ALTENTRY(fuiword)
+ENTRY(fuword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(fusword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movzwl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(fuibyte)
+ENTRY(fubyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movzbl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ ALIGN_TEXT
+fusufault:
+ movl _curpcb,%ecx
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ decl %eax
+ ret
+
+/*
+ * su{byte,sword,word}: write a byte(word, longword) to user memory
+ */
+#ifdef USE_486_WRITE_PROTECT
+/*
+ * we only have to set the right segment selector.
+ */
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movl %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movw 8(%esp),%ax
+ gs
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movb 8(%esp),%al
+ gs
+ movb %al,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+
+#else /* USE_486_WRITE_PROTECT */
+/*
+ * here starts the trouble again: check PTE, twice if word crosses
+ * a page boundary.
+ */
+/* XXX - page boundary crossing is not handled yet */
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movb %al,(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movl %eax,0(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+#endif /* USE_486_WRITE_PROTECT */
+
+/*
+ * copyoutstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+#ifdef USE_486_WRITE_PROTECT
+
+ENTRY(copyoutstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ /*
+ * gs override doesn't work for stosb. Use the same explicit check
+ * as in copyout(). It's much slower now because it is per-char.
+ * XXX - however, it would be faster to rewrite this function to use
+ * strlen() and copyout().
+ */
+ cmpl $VM_END_USER_ADDRESS,%edi
+ jae cpystrflt
+ lodsb
+ gs
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+#else /* ndef USE_486_WRITE_PROTECT */
+
+ENTRY(copyoutstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+1:
+ /*
+ * It suffices to check that the first byte is in user space, because
+ * we look at a page at a time and the end address is on a page
+ * boundary.
+ */
+ cmpl $VM_END_USER_ADDRESS,%edi
+ jae cpystrflt
+ movl %edi,%eax
+ shrl $IDXSHIFT,%eax
+ andb $0xfc,%al
+ movb _PTmap(%eax),%al
+ andb $7,%al
+ cmpb $7,%al
+ je 2f
+
+ /* simulate trap */
+ pushl %edx
+ pushl %edi
+ call _trapwrite
+ popl %edi
+ popl %edx
+ orl %eax,%eax
+ jnz cpystrflt
+
+2: /* copy up to end of this page */
+ movl %edi,%eax
+ andl $NBPG-1,%eax
+ movl $NBPG,%ecx
+ subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
+ cmpl %ecx,%edx
+ jge 3f
+ movl %edx,%ecx /* ecx = min(ecx, edx) */
+3:
+ orl %ecx,%ecx
+ jz 4f
+ decl %ecx
+ decl %edx
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 3b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+
+4: /* next page */
+ orl %edx,%edx
+ jnz 1b
+
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+#endif /* USE_486_WRITE_PROTECT */
+
+/*
+ * copyinstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+ENTRY(copyinstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ gs
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+cpystrflt:
+ movl $EFAULT,%eax
+6:
+ /* set *lencopied and return %eax */
+ movl _curpcb,%ecx
+ movl $0,PCB_ONFAULT(%ecx)
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 7f
+ movl %ecx,(%edx)
+7:
+ popl %edi
+ popl %esi
+ ret
+
+
+/*
+ * copystr(from, to, maxlen, int *lencopied)
+ */
+ENTRY(copystr)
+ pushl %esi
+ pushl %edi
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+
+6:
+ /* set *lencopied and return %eax */
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 7f
+ movl %ecx,(%edx)
+7:
+ popl %edi
+ popl %esi
+ ret
+
+/*
+ * Handling of special 386 registers and descriptor tables etc
+ */
+/* void lgdt(struct region_descriptor *rdp); */
+ENTRY(lgdt)
+ /* reload the descriptor table */
+ movl 4(%esp),%eax
+ lgdt (%eax)
+
+ /* flush the prefetch q */
+ jmp 1f
+ nop
+1:
+ /* reload "stale" selectors */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ movl %ax,%ss
+
+ /* reload code selector by turning return into intersegmental return */
+ movl (%esp),%eax
+ pushl %eax
+# movl $KCSEL,4(%esp)
+ movl $8,4(%esp)
+ lret
+
+/*
+ * void lidt(struct region_descriptor *rdp);
+ */
+ENTRY(lidt)
+ movl 4(%esp),%eax
+ lidt (%eax)
+ ret
+
+/*
+ * void lldt(u_short sel)
+ */
+ENTRY(lldt)
+ lldt 4(%esp)
+ ret
+
+/*
+ * void ltr(u_short sel)
+ */
+ENTRY(ltr)
+ ltr 4(%esp)
+ ret
+
+/* ssdtosd(*ssdp,*sdp) */
+ENTRY(ssdtosd)
+ pushl %ebx
+ movl 8(%esp),%ecx
+ movl 8(%ecx),%ebx
+ shll $16,%ebx
+ movl (%ecx),%edx
+ roll $16,%edx
+ movb %dh,%bl
+ movb %dl,%bh
+ rorl $8,%ebx
+ movl 4(%ecx),%eax
+ movw %ax,%dx
+ andl $0xf0000,%eax
+ orl %eax,%ebx
+ movl 12(%esp),%ecx
+ movl %edx,(%ecx)
+ movl %ebx,4(%ecx)
+ popl %ebx
+ ret
+
+
+/* tlbflush() */
+ENTRY(tlbflush)
+ movl %cr3,%eax
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+
+/* load_cr0(cr0) */
+ENTRY(load_cr0)
+ movl 4(%esp),%eax
+ movl %eax,%cr0
+ ret
+
+
+/* rcr0() */
+ENTRY(rcr0)
+ movl %cr0,%eax
+ ret
+
+
+/* rcr2() */
+ENTRY(rcr2)
+ movl %cr2,%eax
+ ret
+
+
+/* rcr3() */
+ENTRY(rcr3)
+ movl %cr3,%eax
+ ret
+
+
+/* void load_cr3(caddr_t cr3) */
+ENTRY(load_cr3)
+ movl 4(%esp),%eax
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+
+/*****************************************************************************/
+/* setjump, longjump */
+/*****************************************************************************/
+
+ENTRY(setjmp)
+ movl 4(%esp),%eax
+ movl %ebx,(%eax) /* save ebx */
+ movl %esp,4(%eax) /* save esp */
+ movl %ebp,8(%eax) /* save ebp */
+ movl %esi,12(%eax) /* save esi */
+ movl %edi,16(%eax) /* save edi */
+ movl (%esp),%edx /* get rta */
+ movl %edx,20(%eax) /* save eip */
+ xorl %eax,%eax /* return(0); */
+ ret
+
+ENTRY(longjmp)
+ movl 4(%esp),%eax
+ movl (%eax),%ebx /* restore ebx */
+ movl 4(%eax),%esp /* restore esp */
+ movl 8(%eax),%ebp /* restore ebp */
+ movl 12(%eax),%esi /* restore esi */
+ movl 16(%eax),%edi /* restore edi */
+ movl 20(%eax),%edx /* get rta */
+ movl %edx,(%esp) /* put in return frame */
+ xorl %eax,%eax /* return(1); */
+ incl %eax
+ ret
+
diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s
new file mode 100644
index 0000000..a2ed642
--- /dev/null
+++ b/sys/amd64/amd64/support.s
@@ -0,0 +1,1031 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "assym.s" /* system definitions */
+#include "errno.h" /* error return codes */
+#include "machine/asmacros.h" /* miscellaneous asm macros */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define IDXSHIFT 10
+
+/*
+ * Support routines for GCC, general C-callable functions
+ */
+ENTRY(__udivsi3)
+ movl 4(%esp),%eax
+ xorl %edx,%edx
+ divl 8(%esp)
+ ret
+
+ENTRY(__divsi3)
+ movl 4(%esp),%eax
+ cltd
+ idivl 8(%esp)
+ ret
+
+ /*
+ * I/O bus instructions via C
+ */
+ENTRY(inb) /* val = inb(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ NOP
+ inb %dx,%al
+ ret
+
+ENTRY(inw) /* val = inw(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ NOP
+ inw %dx,%ax
+ ret
+
+ENTRY(insb) /* insb(port, addr, cnt) */
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ insb
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(insw) /* insw(port, addr, cnt) */
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ insw
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(rtcin) /* rtcin(val) */
+ movl 4(%esp),%eax
+ outb %al,$0x70
+ NOP
+ xorl %eax,%eax
+ inb $0x71,%al
+ ret
+
+ENTRY(outb) /* outb(port, val) */
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outb %al,%dx
+ NOP
+ ret
+
+ENTRY(outw) /* outw(port, val) */
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outw %ax,%dx
+ NOP
+ ret
+
+ENTRY(outsb) /* outsb(port, addr, cnt) */
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ outsb
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ENTRY(outsw) /* outsw(port, addr, cnt) */
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ outsw
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+/*
+ * bcopy family
+ */
+/* void bzero(void *base, u_int cnt) */
+ENTRY(bzero)
+ pushl %edi
+ movl 8(%esp),%edi
+ movl 12(%esp),%ecx
+ xorl %eax,%eax
+ shrl $2,%ecx
+ cld
+ rep
+ stosl
+ movl 12(%esp),%ecx
+ andl $3,%ecx
+ rep
+ stosb
+ popl %edi
+ ret
+
+/* fillw(pat, base, cnt) */
+ENTRY(fillw)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosw
+ popl %edi
+ ret
+
+ENTRY(bcopyb)
+bcopyb:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards. */
+ addl %ecx,%esi
+ std
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyw)
+bcopyw:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $1,%ecx /* copy by 16-bit words */
+ rep
+ movsw
+ adc %ecx,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $1,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 16-bit words */
+ shrl $1,%ecx
+ decl %esi
+ decl %edi
+ rep
+ movsw
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyx)
+ movl 16(%esp),%eax
+ cmpl $2,%eax
+ je bcopyw /* not _bcopyw, to avoid multiple mcounts */
+ cmpl $4,%eax
+ je bcopy
+ jmp bcopyb
+
+/*
+ * (ov)bcopy(src, dst, cnt)
+ * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
+ */
+ALTENTRY(ovbcopy)
+ENTRY(bcopy)
+bcopy:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $2,%ecx /* copy by 32-bit words */
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $3,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 32-bit words */
+ shrl $2,%ecx
+ subl $3,%esi
+ subl $3,%edi
+ rep
+ movsl
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ALTENTRY(ntohl)
+ENTRY(htonl)
+ movl 4(%esp),%eax
+#ifdef i486
+/* XXX */
+/* Since Gas 1.38 does not grok bswap this has been coded as the
+ * equivalent bytes. This can be changed back to bswap when we
+ * upgrade to a newer version of Gas */
+ /* bswap %eax */
+ .byte 0x0f
+ .byte 0xc8
+#else
+ xchgb %al,%ah
+ roll $16,%eax
+ xchgb %al,%ah
+#endif
+ ret
+
+ALTENTRY(ntohs)
+ENTRY(htons)
+ movzwl 4(%esp),%eax
+ xchgb %al,%ah
+ ret
+
+/*****************************************************************************/
+/* copyout and fubyte family */
+/*****************************************************************************/
+/*
+ * Access user memory from inside the kernel. These routines and possibly
+ * the math- and DOS emulators should be the only places that do this.
+ *
+ * We have to access the memory with user's permissions, so use a segment
+ * selector with RPL 3. For writes to user space we have to additionally
+ * check the PTE for write permission, because the 386 does not check
+ * write permissions when we are executing with EPL 0. The 486 does check
+ * this if the WP bit is set in CR0, so we can use a simpler version here.
+ *
+ * These routines set curpcb->onfault for the time they execute. When a
+ * protection violation occurs inside the functions, the trap handler
+ * returns to *curpcb->onfault instead of the function.
+ */
+
+
+ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
+ movl _curpcb,%eax
+ movl $copyout_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ pushl %ebx
+ movl 16(%esp),%esi
+ movl 20(%esp),%edi
+ movl 24(%esp),%ebx
+ orl %ebx,%ebx /* anything to do? */
+ jz done_copyout
+
+ /*
+ * Check explicitly for non-user addresses. If 486 write protection
+ * is being used, this check is essential because we are in kernel
+ * mode so the h/w does not provide any protection against writing
+ * kernel addresses.
+ *
+ * Otherwise, it saves having to load and restore %es to get the
+ * usual segment-based protection (the destination segment for movs
+ * is always %es). The other explicit checks for user-writablility
+ * are not quite sufficient. They fail for the user area because
+ * we mapped the user area read/write to avoid having an #ifdef in
+ * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
+ * addresses including 0xff800000 and 0xfc000000). I'm not sure if
+ * this can be fixed. Marking the PTEs supervisor mode and the
+ * PDE's user mode would almost work, but there may be a problem
+ * with the self-referential PDE.
+ */
+ movl %edi,%eax
+ addl %ebx,%eax
+ jc copyout_fault
+#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
+ cmpl $VM_END_USER_ADDRESS,%eax
+ ja copyout_fault
+
+#ifndef USE_486_WRITE_PROTECT
+/*
+ * We have to check each PTE for user write permission.
+ * The checking may cause a page fault, so it is important to set
+ * up everything for return via copyout_fault before here.
+ */
+ /* compute number of pages */
+ movl %edi,%ecx
+ andl $NBPG-1,%ecx
+ addl %ebx,%ecx
+ decl %ecx
+ shrl $IDXSHIFT+2,%ecx
+ incl %ecx
+
+ /* compute PTE offset for start address */
+ movl %edi,%edx
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+
+1: /* check PTE for each page */
+ movb _PTmap(%edx),%al
+ andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
+ cmpb $0x07,%al
+ je 2f
+
+ /* simulate a trap */
+ pushl %edx
+ pushl %ecx
+ shll $IDXSHIFT,%edx
+ pushl %edx
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+ popl %ecx
+ popl %edx
+
+ orl %eax,%eax /* if not ok, return EFAULT */
+ jnz copyout_fault
+
+2:
+ addl $4,%edx
+ decl %ecx
+ jnz 1b /* check next page */
+#endif /* ndef USE_486_WRITE_PROTECT */
+
+ /* bcopy(%esi, %edi, %ebx) */
+ cld
+ movl %ebx,%ecx
+ shrl $2,%ecx
+ rep
+ movsl
+ movb %bl,%cl
+ andb $3,%cl /* XXX can we trust the rest of %ecx on clones? */
+ rep
+ movsb
+
+done_copyout:
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyout_fault:
+ popl %ebx
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/* copyin(from_user, to_kernel, len) */
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $copyin_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi /* caddr_t from */
+ movl 16(%esp),%edi /* caddr_t to */
+ movl 20(%esp),%ecx /* size_t len */
+
+ movb %cl,%al
+ shrl $2,%ecx /* copy longword-wise */
+ cld
+ gs
+ rep
+ movsl
+ movb %al,%cl
+ andb $3,%cl /* copy remaining bytes */
+ gs
+ rep
+ movsb
+
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyin_fault:
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/*
+ * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
+ */
+ALTENTRY(fuiword)
+ENTRY(fuword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(fusword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movzwl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(fuibyte)
+ENTRY(fubyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movzbl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ ALIGN_TEXT
+fusufault:
+ movl _curpcb,%ecx
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ decl %eax
+ ret
+
+/*
+ * su{byte,sword,word}: write a byte(word, longword) to user memory
+ */
+#ifdef USE_486_WRITE_PROTECT
+/*
+ * we only have to set the right segment selector.
+ */
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movl %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movw 8(%esp),%ax
+ gs
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movb 8(%esp),%al
+ gs
+ movb %al,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+
+#else /* USE_486_WRITE_PROTECT */
+/*
+ * here starts the trouble again: check PTE, twice if word crosses
+ * a page boundary.
+ */
+/* XXX - page boundary crossing is not handled yet */
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movb %al,(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movl %eax,0(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+#endif /* USE_486_WRITE_PROTECT */
+
+/*
+ * copyoutstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+#ifdef USE_486_WRITE_PROTECT
+
+ENTRY(copyoutstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ /*
+ * gs override doesn't work for stosb. Use the same explicit check
+ * as in copyout(). It's much slower now because it is per-char.
+ * XXX - however, it would be faster to rewrite this function to use
+ * strlen() and copyout().
+ */
+ cmpl $VM_END_USER_ADDRESS,%edi
+ jae cpystrflt
+ lodsb
+ gs
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+#else /* ndef USE_486_WRITE_PROTECT */
+
+ENTRY(copyoutstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+1:
+ /*
+ * It suffices to check that the first byte is in user space, because
+ * we look at a page at a time and the end address is on a page
+ * boundary.
+ */
+ cmpl $VM_END_USER_ADDRESS,%edi
+ jae cpystrflt
+ movl %edi,%eax
+ shrl $IDXSHIFT,%eax
+ andb $0xfc,%al
+ movb _PTmap(%eax),%al
+ andb $7,%al
+ cmpb $7,%al
+ je 2f
+
+ /* simulate trap */
+ pushl %edx
+ pushl %edi
+ call _trapwrite
+ popl %edi
+ popl %edx
+ orl %eax,%eax
+ jnz cpystrflt
+
+2: /* copy up to end of this page */
+ movl %edi,%eax
+ andl $NBPG-1,%eax
+ movl $NBPG,%ecx
+ subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
+ cmpl %ecx,%edx
+ jge 3f
+ movl %edx,%ecx /* ecx = min(ecx, edx) */
+3:
+ orl %ecx,%ecx
+ jz 4f
+ decl %ecx
+ decl %edx
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 3b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+
+4: /* next page */
+ orl %edx,%edx
+ jnz 1b
+
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+#endif /* USE_486_WRITE_PROTECT */
+
+/*
+ * copyinstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+ENTRY(copyinstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ gs
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+cpystrflt:
+ movl $EFAULT,%eax
+6:
+ /* set *lencopied and return %eax */
+ movl _curpcb,%ecx
+ movl $0,PCB_ONFAULT(%ecx)
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 7f
+ movl %ecx,(%edx)
+7:
+ popl %edi
+ popl %esi
+ ret
+
+
+/*
+ * copystr(from, to, maxlen, int *lencopied)
+ */
+ENTRY(copystr)
+ pushl %esi
+ pushl %edi
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+
+6:
+ /* set *lencopied and return %eax */
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 7f
+ movl %ecx,(%edx)
+7:
+ popl %edi
+ popl %esi
+ ret
+
+/*
+ * Handling of special 386 registers and descriptor tables etc
+ */
+/* void lgdt(struct region_descriptor *rdp); */
+ENTRY(lgdt)
+ /* reload the descriptor table */
+ movl 4(%esp),%eax
+ lgdt (%eax)
+
+ /* flush the prefetch q */
+ jmp 1f
+ nop
+1:
+ /* reload "stale" selectors */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ movl %ax,%ss
+
+ /* reload code selector by turning return into intersegmental return */
+ movl (%esp),%eax
+ pushl %eax
+# movl $KCSEL,4(%esp)
+ movl $8,4(%esp)
+ lret
+
+/*
+ * void lidt(struct region_descriptor *rdp);
+ */
+ENTRY(lidt)
+ movl 4(%esp),%eax
+ lidt (%eax)
+ ret
+
+/*
+ * void lldt(u_short sel)
+ */
+ENTRY(lldt)
+ lldt 4(%esp)
+ ret
+
+/*
+ * void ltr(u_short sel)
+ */
+ENTRY(ltr)
+ ltr 4(%esp)
+ ret
+
+/* ssdtosd(*ssdp,*sdp) */
+ENTRY(ssdtosd)
+ pushl %ebx
+ movl 8(%esp),%ecx
+ movl 8(%ecx),%ebx
+ shll $16,%ebx
+ movl (%ecx),%edx
+ roll $16,%edx
+ movb %dh,%bl
+ movb %dl,%bh
+ rorl $8,%ebx
+ movl 4(%ecx),%eax
+ movw %ax,%dx
+ andl $0xf0000,%eax
+ orl %eax,%ebx
+ movl 12(%esp),%ecx
+ movl %edx,(%ecx)
+ movl %ebx,4(%ecx)
+ popl %ebx
+ ret
+
+
+/* tlbflush() */
+ENTRY(tlbflush)
+ movl %cr3,%eax
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+
+/* load_cr0(cr0) */
+ENTRY(load_cr0)
+ movl 4(%esp),%eax
+ movl %eax,%cr0
+ ret
+
+
+/* rcr0() */
+ENTRY(rcr0)
+ movl %cr0,%eax
+ ret
+
+
+/* rcr2() */
+ENTRY(rcr2)
+ movl %cr2,%eax
+ ret
+
+
+/* rcr3() */
+ENTRY(rcr3)
+ movl %cr3,%eax
+ ret
+
+
+/* void load_cr3(caddr_t cr3) */
+ENTRY(load_cr3)
+ movl 4(%esp),%eax
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+
+/*****************************************************************************/
+/* setjump, longjump */
+/*****************************************************************************/
+
+ENTRY(setjmp)
+ movl 4(%esp),%eax
+ movl %ebx,(%eax) /* save ebx */
+ movl %esp,4(%eax) /* save esp */
+ movl %ebp,8(%eax) /* save ebp */
+ movl %esi,12(%eax) /* save esi */
+ movl %edi,16(%eax) /* save edi */
+ movl (%esp),%edx /* get rta */
+ movl %edx,20(%eax) /* save eip */
+ xorl %eax,%eax /* return(0); */
+ ret
+
+ENTRY(longjmp)
+ movl 4(%esp),%eax
+ movl (%eax),%ebx /* restore ebx */
+ movl 4(%eax),%esp /* restore esp */
+ movl 8(%eax),%ebp /* restore ebp */
+ movl 12(%eax),%esi /* restore esi */
+ movl 16(%eax),%edi /* restore edi */
+ movl 20(%eax),%edx /* get rta */
+ movl %edx,(%esp) /* put in return frame */
+ xorl %eax,%eax /* return(1); */
+ incl %eax
+ ret
+
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
new file mode 100644
index 0000000..ec6e8bc
--- /dev/null
+++ b/sys/amd64/amd64/swtch.s
@@ -0,0 +1,435 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "npx.h" /* for NNPX */
+#include "assym.s" /* for preprocessor defines */
+#include "errno.h" /* for error codes */
+
+#include "i386/isa/debug.h" /* for SHOW macros */
+#include "machine/asmacros.h" /* for miscellaneous assembly macros */
+
+/*****************************************************************************/
+/* Scheduling */
+/*****************************************************************************/
+
+/*
+ * The following primitives manipulate the run queues.
+ * _whichqs tells which of the 32 queues _qs
+ * have processes in them. Setrq puts processes into queues, Remrq
+ * removes them from queues. The running process is on no queue,
+ * other processes are on a queue related to p->p_pri, divided by 4
+ * actually to shrink the 0-127 range of priorities into the 32 available
+ * queues.
+ */
+ .data
+ .globl _curpcb, _whichqs
+_curpcb: .long 0 /* pointer to curproc's PCB area */
+_whichqs: .long 0 /* which run queues have data */
+
+ .globl _qs,_cnt,_panic
+ .comm _noproc,4
+ .comm _runrun,4
+
+ .globl _want_resched
+_want_resched: .long 0 /* we need to re-run the scheduler */
+
+ .text
+/*
+ * Setrq(p)
+ *
+ * Call should be made at spl6(), and p->p_stat should be SRUN
+ */
+ENTRY(setrq)
+ movl 4(%esp),%eax
+ cmpl $0,P_RLINK(%eax) /* should not be on q already */
+ je set1
+ pushl $set2
+ call _panic
+set1:
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btsl %edx,_whichqs /* set q full bit */
+ shll $3,%edx
+ addl $_qs,%edx /* locate q hdr */
+ movl %edx,P_LINK(%eax) /* link process on tail of q */
+ movl P_RLINK(%edx),%ecx
+ movl %ecx,P_RLINK(%eax)
+ movl %eax,P_RLINK(%edx)
+ movl %eax,P_LINK(%ecx)
+ ret
+
+set2: .asciz "setrq"
+
+/*
+ * Remrq(p)
+ *
+ * Call should be made at spl6().
+ */
+ENTRY(remrq)
+ movl 4(%esp),%eax
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btrl %edx,_whichqs /* clear full bit, panic if clear already */
+ jb rem1
+ pushl $rem3
+ call _panic
+rem1:
+ pushl %edx
+ movl P_LINK(%eax),%ecx /* unlink process */
+ movl P_RLINK(%eax),%edx
+ movl %edx,P_RLINK(%ecx)
+ movl P_RLINK(%eax),%ecx
+ movl P_LINK(%eax),%edx
+ movl %edx,P_LINK(%ecx)
+ popl %edx
+ movl $_qs,%ecx
+ shll $3,%edx
+ addl %edx,%ecx
+ cmpl P_LINK(%ecx),%ecx /* q still has something? */
+ je rem2
+ shrl $3,%edx /* yes, set bit as still full */
+ btsl %edx,_whichqs
+rem2:
+ movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
+ ret
+
+rem3: .asciz "remrq"
+sw0: .asciz "swtch"
+
+/*
+ * When no processes are on the runq, Swtch branches to idle
+ * to wait for something to come ready.
+ */
+ ALIGN_TEXT
+Idle:
+ sti
+ SHOW_STI
+
+ ALIGN_TEXT
+idle_loop:
+ call _spl0
+ cmpl $0,_whichqs
+ jne sw1
+ hlt /* wait for interrupt */
+ jmp idle_loop
+
+badsw:
+ pushl $sw0
+ call _panic
+ /*NOTREACHED*/
+
+/*
+ * Swtch()
+ */
+ SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
+ENTRY(swtch)
+
+ incl _cnt+V_SWTCH
+
+ /* switch to new process. first, save context as needed */
+
+ movl _curproc,%ecx
+
+ /* if no process to save, don't bother */
+ testl %ecx,%ecx
+ je sw1
+
+ movl P_ADDR(%ecx),%ecx
+
+ movl (%esp),%eax /* Hardware registers */
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /* have we used fp, and need a save? */
+ mov _curproc,%eax
+ cmp %eax,_npxproc
+ jne 1f
+ pushl %ecx /* h/w bugs make saving complicated */
+ leal PCB_SAVEFPU(%ecx),%eax
+ pushl %eax
+ call _npxsave /* do it in a big C function */
+ popl %eax
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%eax /* save temporary map PTE */
+ movl %eax,PCB_CMAP2(%ecx) /* in our context */
+ movl $0,_curproc /* out of process */
+
+# movw _cpl,%ax
+# movw %ax,PCB_IML(%ecx) /* save ipl */
+
+ /* save is done, now choose a new process or idle */
+sw1:
+ cli
+ SHOW_CLI
+ movl _whichqs,%edi
+2:
+ /* XXX - bsf is sloow */
+ bsfl %edi,%eax /* find a full q */
+ je Idle /* if none, idle */
+ /* XX update whichqs? */
+swfnd:
+ btrl %eax,%edi /* clear q full status */
+ jnb 2b /* if it was clear, look for another */
+ movl %eax,%ebx /* save which one we are using */
+
+ shll $3,%eax
+ addl $_qs,%eax /* select q */
+ movl %eax,%esi
+
+#ifdef DIAGNOSTIC
+ cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
+ je badsw /* not possible */
+#endif
+
+ movl P_LINK(%eax),%ecx /* unlink from front of process q */
+ movl P_LINK(%ecx),%edx
+ movl %edx,P_LINK(%eax)
+ movl P_RLINK(%ecx),%eax
+ movl %eax,P_RLINK(%edx)
+
+ cmpl P_LINK(%ecx),%esi /* q empty */
+ je 3f
+ btsl %ebx,%edi /* nope, set to indicate full */
+3:
+ movl %edi,_whichqs /* update q status */
+
+ movl $0,%eax
+ movl %eax,_want_resched
+
+#ifdef DIAGNOSTIC
+ cmpl %eax,P_WCHAN(%ecx)
+ jne badsw
+ cmpb $SRUN,P_STAT(%ecx)
+ jne badsw
+#endif
+
+ movl %eax,P_RLINK(%ecx) /* isolate process to run */
+ movl P_ADDR(%ecx),%edx
+ movl PCB_CR3(%edx),%ebx
+
+ /* switch address space */
+ movl %ebx,%cr3
+
+ /* restore context */
+ movl PCB_EBX(%edx),%ebx
+ movl PCB_ESP(%edx),%esp
+ movl PCB_EBP(%edx),%ebp
+ movl PCB_ESI(%edx),%esi
+ movl PCB_EDI(%edx),%edi
+ movl PCB_EIP(%edx),%eax
+ movl %eax,(%esp)
+
+ movl PCB_CMAP2(%edx),%eax /* get temporary map */
+ movl %eax,_CMAP2 /* reload temporary map PTE */
+
+ movl %ecx,_curproc /* into next process */
+ movl %edx,_curpcb
+
+ pushl %edx /* save p to return */
+/*
+ * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
+ * I think restoring the cpl is unnecessary, but we must turn off the cli
+ * now that spl*() don't do it as a side affect.
+ */
+ pushl PCB_IML(%edx)
+ sti
+ SHOW_STI
+#if 0
+ call _splx
+#endif
+ addl $4,%esp
+/*
+ * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
+ * same way. Better return a value.
+ */
+ popl %eax /* return(p); */
+ ret
+
+ENTRY(mvesp)
+ movl %esp,%eax
+ ret
+/*
+ * struct proc *swtch_to_inactive(p) ; struct proc *p;
+ *
+ * At exit of a process, move off the address space of the
+ * process and onto a "safe" one. Then, on a temporary stack
+ * return and run code that disposes of the old state.
+ * Since this code requires a parameter from the "old" stack,
+ * pass it back as a return value.
+ */
+ENTRY(swtch_to_inactive)
+ popl %edx /* old pc */
+ popl %eax /* arg, our return value */
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3 /* good bye address space */
+ #write buffer?
+ movl $tmpstk-4,%esp /* temporary stack, compensated for call */
+ jmp %edx /* return, execute remainder of cleanup */
+
+/*
+ * savectx(pcb, altreturn)
+ * Update pcb, saving current processor state and arranging
+ * for alternate return ala longjmp in swtch if altreturn is true.
+ */
+ENTRY(savectx)
+ movl 4(%esp),%ecx
+ movw _cpl,%ax
+ movw %ax,PCB_IML(%ecx)
+ movl (%esp),%eax
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /*
+ * If npxproc == NULL, then the npx h/w state is irrelevant and the
+ * state had better already be in the pcb. This is true for forks
+ * but not for dumps (the old book-keeping with FP flags in the pcb
+ * always lost for dumps because the dump pcb has 0 flags).
+ *
+ * If npxproc != NULL, then we have to save the npx h/w state to
+ * npxproc's pcb and copy it to the requested pcb, or save to the
+ * requested pcb and reload. Copying is easier because we would
+ * have to handle h/w bugs for reloading. We used to lose the
+ * parent's npx state for forks by forgetting to reload.
+ */
+ mov _npxproc,%eax
+ testl %eax,%eax
+ je 1f
+
+ pushl %ecx
+ movl P_ADDR(%eax),%eax
+ leal PCB_SAVEFPU(%eax),%eax
+ pushl %eax
+ pushl %eax
+ call _npxsave
+ popl %eax
+ popl %eax
+ popl %ecx
+
+ pushl %ecx
+ pushl $108+8*2 /* XXX h/w state size + padding */
+ leal PCB_SAVEFPU(%ecx),%ecx
+ pushl %ecx
+ pushl %eax
+ call _bcopy
+ addl $12,%esp
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%edx /* save temporary map PTE */
+ movl %edx,PCB_CMAP2(%ecx) /* in our context */
+
+ cmpl $0,8(%esp)
+ je 1f
+ movl %esp,%edx /* relocate current sp relative to pcb */
+ subl $_kstack,%edx /* (sp is relative to kstack): */
+ addl %edx,%ecx /* pcb += sp - kstack; */
+ movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
+
+/* this mess deals with replicating register state gcc hides */
+ movl 12(%esp),%eax
+ movl %eax,12(%ecx)
+ movl 16(%esp),%eax
+ movl %eax,16(%ecx)
+ movl 20(%esp),%eax
+ movl %eax,20(%ecx)
+ movl 24(%esp),%eax
+ movl %eax,24(%ecx)
+1:
+ xorl %eax,%eax /* return 0 */
+ ret
+
+/*
+ * addupc(int pc, struct uprof *up, int ticks):
+ * update profiling information for the user process.
+ */
+ENTRY(addupc)
+ pushl %ebp
+ movl %esp,%ebp
+ movl 12(%ebp),%edx /* up */
+ movl 8(%ebp),%eax /* pc */
+
+ subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
+ jl L1 /* if (pc < 0) return */
+
+ shrl $1,%eax /* praddr = pc >> 1 */
+ imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
+ shrl $15,%eax /* praddr = praddr << 15 */
+ andl $-2,%eax /* praddr &= ~1 */
+
+ cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
+ ja L1
+
+/* addl %eax,%eax /* praddr -> word offset */
+ addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
+ movl 16(%ebp),%ecx /* ticks */
+
+ movl _curpcb,%edx
+ movl $proffault,PCB_ONFAULT(%edx)
+ addl %ecx,(%eax) /* storage location += ticks */
+ movl $0,PCB_ONFAULT(%edx)
+L1:
+ leave
+ ret
+
+ ALIGN_TEXT
+proffault:
+ /* if we get a fault, then kill profiling all together */
+ movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
+ movl 12(%ebp),%ecx
+ movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
+ leave
+ ret
+
+/* To be done: */
+ENTRY(astoff)
+ ret
+
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index c224396..92247dd 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
- * $Id: trap.c,v 1.5 1993/11/01 11:51:29 chmr Exp $
+ * $Id: trap.c,v 1.6 1993/11/04 15:05:41 davidg Exp $
*/
/*
@@ -85,6 +85,38 @@ int dostacklimits;
unsigned rcr2();
extern short cpl;
+#define MAX_TRAP_MSG 27
+char *trap_msg[] = {
+ "reserved addressing fault", /* 0 T_RESADFLT */
+ "privileged instruction fault", /* 1 T_PRIVINFLT */
+ "reserved operand fault", /* 2 T_RESOPFLT */
+ "breakpoint instruction fault", /* 3 T_BPTFLT */
+ "", /* 4 unused */
+ "system call trap", /* 5 T_SYSCALL */
+ "arithmetic trap", /* 6 T_ARITHTRAP */
+ "system forced exception", /* 7 T_ASTFLT */
+ "segmentation (limit) fault", /* 8 T_SEGFLT */
+ "protection fault", /* 9 T_PROTFLT */
+ "trace trap", /* 10 T_TRCTRAP */
+ "", /* 11 unused */
+ "page fault", /* 12 T_PAGEFLT */
+ "page table fault", /* 13 T_TABLEFLT */
+ "alignment fault", /* 14 T_ALIGNFLT */
+ "kernel stack pointer not valid", /* 15 T_KSPNOTVAL */
+ "bus error", /* 16 T_BUSERR */
+ "kernel debugger fault", /* 17 T_KDBTRAP */
+ "integer divide fault", /* 18 T_DIVIDE */
+ "non-maskable interrupt trap", /* 19 T_NMI */
+ "overflow trap", /* 20 T_OFLOW */
+ "FPU bounds check fault", /* 21 T_BOUND */
+ "FPU device not available", /* 22 T_DNA */
+ "double fault", /* 23 T_DOUBLEFLT */
+ "FPU operand fetch fault", /* 24 T_FPOPFLT */
+ "invalid TSS fault", /* 25 T_TSSFLT */
+ "segment not present fault", /* 26 T_SEGNPFLT */
+ "stack fault", /* 27 T_STKFLT */
+};
+
/*
* trap(frame):
@@ -165,13 +197,23 @@ copyfault:
return;
#endif
- printf("trap type %d code = %x eip = %x cs = %x eflags = %x ",
+ if ((type & ~T_USER) <= MAX_TRAP_MSG)
+ printf("\n\nFatal trap %d: %s while in %s mode\n",
+ type & ~T_USER, trap_msg[type & ~T_USER],
+ (type & T_USER) ? "user" : "kernel");
+
+ printf("trap type = %d, code = %x\n eip = %x, cs = %x, eflags = %x, ",
frame.tf_trapno, frame.tf_err, frame.tf_eip,
frame.tf_cs, frame.tf_eflags);
- eva = rcr2();
- printf("cr2 %x cpl %x\n", eva, cpl);
- /* type &= ~T_USER; */ /* XXX what the hell is this */
- panic("trap");
+ eva = rcr2();
+ printf("cr2 = %x, current priority = %x\n", eva, cpl);
+
+ type &= ~T_USER;
+ if (type <= MAX_TRAP_MSG)
+ panic(trap_msg[type]);
+ else
+ panic("unknown/reserved trap");
+
/*NOTREACHED*/
case T_SEGNPFLT|T_USER:
diff --git a/sys/amd64/include/asmacros.h b/sys/amd64/include/asmacros.h
new file mode 100644
index 0000000..f0f2c01
--- /dev/null
+++ b/sys/amd64/include/asmacros.h
@@ -0,0 +1,43 @@
+#define ALIGN_DATA .align 2 /* 4 byte alignment, zero filled */
+#define ALIGN_TEXT .align 2,0x90 /* 4-byte alignment, nop filled */
+#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte alignment (better for 486), nop filled */
+
+#define GEN_ENTRY(name) ALIGN_TEXT; .globl name; name:
+#define NON_GPROF_ENTRY(name) GEN_ENTRY(_/**/name)
+
+#ifdef GPROF
+/*
+ * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
+ * over the mcounting.
+ */
+#define ALTENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
+#define ENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; 2:
+/*
+ * The call to mcount supports the usual (bad) conventions. We allocate
+ * some data and pass a pointer to it although the FreeBSD doesn't use
+ * the data. We set up a frame before calling mcount because that is
+ * the standard convention although it makes work for both mcount and
+ * callers.
+ */
+#define MCOUNT .data; ALIGN_DATA; 1:; .long 0; .text; \
+ pushl %ebp; movl %esp,%ebp; \
+ movl $1b,%eax; call mcount; popl %ebp
+#else
+/*
+ * ALTENTRY() has to align because it is before a corresponding ENTRY().
+ * ENTRY() has to align to because there may be no ALTENTRY() before it.
+ * If there is a previous ALTENTRY() then the alignment code is empty.
+ */
+#define ALTENTRY(name) GEN_ENTRY(_/**/name)
+#define ENTRY(name) GEN_ENTRY(_/**/name)
+
+#endif
+
+#ifdef DUMMY_NOPS /* this will break some older machines */
+#define FASTER_NOP
+#define NOP
+#else
+#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
+#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
+#endif
+
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index f9baf22..9feb23c 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
- * $Id: pmap.h,v 1.4 1993/10/15 10:07:44 rgrimes Exp $
+ * $Id: pmap.h,v 1.5 1993/11/07 17:43:02 wollman Exp $
*/
#ifndef _PMAP_MACHINE_
@@ -118,11 +118,11 @@ typedef struct pde pd_entry_t; /* page directory entry */
typedef struct pte pt_entry_t; /* Mach page table entry */
/*
- * NKPDE controls the virtual space of the kernel, what ever is left is
- * given to the user (NUPDE)
+ * NKPDE controls the virtual space of the kernel, what ever is left, minus
+ * the alternate page table area is given to the user (NUPDE)
*/
#define NKPDE 7 /* number of kernel pde's */
-#define NUPDE (NPTEPG-NKPDE) /* number of user pde's */
+#define NUPDE (NPTEPG-NKPDE-1)/* number of user pde's */
/*
* The *PTDI values control the layout of virtual memory
*
@@ -132,7 +132,11 @@ typedef struct pte pt_entry_t; /* Mach page table entry */
#define APTDPTDI (NPTEPG-1) /* alt ptd entry that points to APTD */
#define KPTDI (APTDPTDI-NKPDE)/* start of kernel virtual pde's */
#define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
-#define UPTDI (PTDPTDI-1) /* ptd entry for u./kernel&user stack */
+#define KSTKPTDI (PTDPTDI-1) /* ptd entry for u./kernel&user stack */
+#define KSTKPTEOFF (NBPG/sizeof(struct pde)-UPAGES) /* pte entry for kernel stack */
+
+#define PDESIZE sizeof(struct pde) /* for assembly files */
+#define PTESIZE sizeof(struct pte) /* for assembly files */
/*
* Address of current and alternate address space page table maps
diff --git a/sys/conf/Makefile.i386 b/sys/conf/Makefile.i386
index 25bf3e7..b414b93 100644
--- a/sys/conf/Makefile.i386
+++ b/sys/conf/Makefile.i386
@@ -1,6 +1,6 @@
# Copyright 1990 W. Jolitz
# from: @(#)Makefile.i386 7.1 5/10/91
-# $Id: Makefile.i386,v 1.11 1993/11/07 04:41:11 wollman Exp $
+# $Id: Makefile.i386,v 1.12 1993/11/07 16:46:33 wollman Exp $
#
# Makefile for FreeBSD
#
@@ -43,7 +43,8 @@ NORMAL_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
NORMAL_S= ${CPP} -I. -DLOCORE ${COPTS} $< | ${AS} ${ASFLAGS} -o $*.o
DRIVER_C= ${CC} -c ${CFLAGS} ${PROF} $<
DRIVER_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
-SYSTEM_OBJS=locore.o ${OBJS} param.o ioconf.o conf.o machdep.o
+SYSTEM_OBJS=locore.o exception.o swtch.o support.o ${OBJS} param.o \
+ ioconf.o conf.o machdep.o
SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS}
SYSTEM_LD_HEAD= @echo loading $@; rm -f $@
SYSTEM_LD= @${LD} -z -T ${LOAD_ADDRESS} -o $@ -X vers.o ${SYSTEM_OBJS}
@@ -82,10 +83,28 @@ symbols.sort: ${I386}/i386/symbols.raw
locore.o: assym.s ${I386}/i386/locore.s machine/trap.h machine/psl.h \
machine/pte.h ${I386}/isa/vector.s ${I386}/isa/icu.s \
$S/sys/errno.h machine/specialreg.h ${I386}/isa/debug.h \
- ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h $S/net/netisr.h
+ ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h $S/net/netisr.h \
+ machine/asmacros.h
${CPP} -I. -DLOCORE ${COPTS} ${I386}/i386/locore.s | \
${AS} ${ASFLAGS} -o locore.o
+exception.o: assym.s ${I386}/i386/exception.s machine/trap.h \
+ ${I386}/isa/vector.s ${I386}/isa/icu.s \
+ $S/sys/errno.h ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h \
+ $S/net/netisr.h machine/asmacros.h
+ ${CPP} -I. -DLOCORE ${COPTS} ${I386}/i386/exception.s | \
+ ${AS} ${ASFLAGS} -o exception.o
+
+swtch.o: assym.s ${I386}/i386/swtch.s \
+ $S/sys/errno.h ${I386}/isa/debug.h machine/asmacros.h
+ ${CPP} -I. ${COPTS} ${I386}/i386/swtch.s | \
+ ${AS} ${ASFLAGS} -o swtch.o
+
+support.o: assym.s ${I386}/i386/support.s \
+ $S/sys/errno.h machine/asmacros.h
+ ${CPP} -I. ${COPTS} ${I386}/i386/support.s | \
+ ${AS} ${ASFLAGS} -o support.o
+
machdep.o: ${I386}/i386/machdep.c Makefile
${CC} -c ${CFLAGS} -DLOAD_ADDRESS=0x${LOAD_ADDRESS} ${PROF} $<
diff --git a/sys/conf/Makefile.powerpc b/sys/conf/Makefile.powerpc
index 25bf3e7..b414b93 100644
--- a/sys/conf/Makefile.powerpc
+++ b/sys/conf/Makefile.powerpc
@@ -1,6 +1,6 @@
# Copyright 1990 W. Jolitz
# from: @(#)Makefile.i386 7.1 5/10/91
-# $Id: Makefile.i386,v 1.11 1993/11/07 04:41:11 wollman Exp $
+# $Id: Makefile.i386,v 1.12 1993/11/07 16:46:33 wollman Exp $
#
# Makefile for FreeBSD
#
@@ -43,7 +43,8 @@ NORMAL_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
NORMAL_S= ${CPP} -I. -DLOCORE ${COPTS} $< | ${AS} ${ASFLAGS} -o $*.o
DRIVER_C= ${CC} -c ${CFLAGS} ${PROF} $<
DRIVER_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
-SYSTEM_OBJS=locore.o ${OBJS} param.o ioconf.o conf.o machdep.o
+SYSTEM_OBJS=locore.o exception.o swtch.o support.o ${OBJS} param.o \
+ ioconf.o conf.o machdep.o
SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS}
SYSTEM_LD_HEAD= @echo loading $@; rm -f $@
SYSTEM_LD= @${LD} -z -T ${LOAD_ADDRESS} -o $@ -X vers.o ${SYSTEM_OBJS}
@@ -82,10 +83,28 @@ symbols.sort: ${I386}/i386/symbols.raw
locore.o: assym.s ${I386}/i386/locore.s machine/trap.h machine/psl.h \
machine/pte.h ${I386}/isa/vector.s ${I386}/isa/icu.s \
$S/sys/errno.h machine/specialreg.h ${I386}/isa/debug.h \
- ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h $S/net/netisr.h
+ ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h $S/net/netisr.h \
+ machine/asmacros.h
${CPP} -I. -DLOCORE ${COPTS} ${I386}/i386/locore.s | \
${AS} ${ASFLAGS} -o locore.o
+exception.o: assym.s ${I386}/i386/exception.s machine/trap.h \
+ ${I386}/isa/vector.s ${I386}/isa/icu.s \
+ $S/sys/errno.h ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h \
+ $S/net/netisr.h machine/asmacros.h
+ ${CPP} -I. -DLOCORE ${COPTS} ${I386}/i386/exception.s | \
+ ${AS} ${ASFLAGS} -o exception.o
+
+swtch.o: assym.s ${I386}/i386/swtch.s \
+ $S/sys/errno.h ${I386}/isa/debug.h machine/asmacros.h
+ ${CPP} -I. ${COPTS} ${I386}/i386/swtch.s | \
+ ${AS} ${ASFLAGS} -o swtch.o
+
+support.o: assym.s ${I386}/i386/support.s \
+ $S/sys/errno.h machine/asmacros.h
+ ${CPP} -I. ${COPTS} ${I386}/i386/support.s | \
+ ${AS} ${ASFLAGS} -o support.o
+
machdep.o: ${I386}/i386/machdep.c Makefile
${CC} -c ${CFLAGS} -DLOAD_ADDRESS=0x${LOAD_ADDRESS} ${PROF} $<
diff --git a/sys/i386/conf/Makefile.i386 b/sys/i386/conf/Makefile.i386
index 25bf3e7..b414b93 100644
--- a/sys/i386/conf/Makefile.i386
+++ b/sys/i386/conf/Makefile.i386
@@ -1,6 +1,6 @@
# Copyright 1990 W. Jolitz
# from: @(#)Makefile.i386 7.1 5/10/91
-# $Id: Makefile.i386,v 1.11 1993/11/07 04:41:11 wollman Exp $
+# $Id: Makefile.i386,v 1.12 1993/11/07 16:46:33 wollman Exp $
#
# Makefile for FreeBSD
#
@@ -43,7 +43,8 @@ NORMAL_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
NORMAL_S= ${CPP} -I. -DLOCORE ${COPTS} $< | ${AS} ${ASFLAGS} -o $*.o
DRIVER_C= ${CC} -c ${CFLAGS} ${PROF} $<
DRIVER_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
-SYSTEM_OBJS=locore.o ${OBJS} param.o ioconf.o conf.o machdep.o
+SYSTEM_OBJS=locore.o exception.o swtch.o support.o ${OBJS} param.o \
+ ioconf.o conf.o machdep.o
SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS}
SYSTEM_LD_HEAD= @echo loading $@; rm -f $@
SYSTEM_LD= @${LD} -z -T ${LOAD_ADDRESS} -o $@ -X vers.o ${SYSTEM_OBJS}
@@ -82,10 +83,28 @@ symbols.sort: ${I386}/i386/symbols.raw
locore.o: assym.s ${I386}/i386/locore.s machine/trap.h machine/psl.h \
machine/pte.h ${I386}/isa/vector.s ${I386}/isa/icu.s \
$S/sys/errno.h machine/specialreg.h ${I386}/isa/debug.h \
- ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h $S/net/netisr.h
+ ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h $S/net/netisr.h \
+ machine/asmacros.h
${CPP} -I. -DLOCORE ${COPTS} ${I386}/i386/locore.s | \
${AS} ${ASFLAGS} -o locore.o
+exception.o: assym.s ${I386}/i386/exception.s machine/trap.h \
+ ${I386}/isa/vector.s ${I386}/isa/icu.s \
+ $S/sys/errno.h ${I386}/isa/icu.h ${I386}/isa/isa.h vector.h \
+ $S/net/netisr.h machine/asmacros.h
+ ${CPP} -I. -DLOCORE ${COPTS} ${I386}/i386/exception.s | \
+ ${AS} ${ASFLAGS} -o exception.o
+
+swtch.o: assym.s ${I386}/i386/swtch.s \
+ $S/sys/errno.h ${I386}/isa/debug.h machine/asmacros.h
+ ${CPP} -I. ${COPTS} ${I386}/i386/swtch.s | \
+ ${AS} ${ASFLAGS} -o swtch.o
+
+support.o: assym.s ${I386}/i386/support.s \
+ $S/sys/errno.h machine/asmacros.h
+ ${CPP} -I. ${COPTS} ${I386}/i386/support.s | \
+ ${AS} ${ASFLAGS} -o support.o
+
machdep.o: ${I386}/i386/machdep.c Makefile
${CC} -c ${CFLAGS} -DLOAD_ADDRESS=0x${LOAD_ADDRESS} ${PROF} $<
diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s
new file mode 100644
index 0000000..d6de874
--- /dev/null
+++ b/sys/i386/i386/exception.s
@@ -0,0 +1,289 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "npx.h" /* NNPX */
+
+#include "assym.s" /* system defines */
+
+#include "errno.h" /* error return codes */
+
+#include "i386/isa/debug.h" /* BDE debugging macros */
+
+#include "machine/trap.h" /* trap codes */
+#include "syscall.h" /* syscall numbers */
+
+#include "machine/asmacros.h" /* miscellaneous macros */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define SEL_RPL_MASK 0x0003
+#define TRAPF_CS_OFF (13 * 4)
+
+ .text
+
+/*****************************************************************************/
+/* Trap handling */
+/*****************************************************************************/
+/*
+ * Trap and fault vector routines
+ *
+ * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
+ * control. The sti's give the standard losing behaviour for ddb and kgdb.
+ */
+#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
+#define TRAP(a) pushl $(a) ; jmp alltraps
+#ifdef KGDB
+# define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
+#else
+# define BPTTRAP(a) sti; TRAP(a)
+#endif
+
+IDTVEC(div)
+ pushl $0; TRAP(T_DIVIDE)
+IDTVEC(dbg)
+#if defined(BDE_DEBUGGER) && defined(BDBTRAP)
+ BDBTRAP(dbg)
+#endif
+ pushl $0; BPTTRAP(T_TRCTRAP)
+IDTVEC(nmi)
+ pushl $0; TRAP(T_NMI)
+IDTVEC(bpt)
+#if defined(BDE_DEBUGGER) && defined(BDBTRAP)
+ BDBTRAP(bpt)
+#endif
+ pushl $0; BPTTRAP(T_BPTFLT)
+IDTVEC(ofl)
+ pushl $0; TRAP(T_OFLOW)
+IDTVEC(bnd)
+ pushl $0; TRAP(T_BOUND)
+IDTVEC(ill)
+ pushl $0; TRAP(T_PRIVINFLT)
+IDTVEC(dna)
+ pushl $0; TRAP(T_DNA)
+IDTVEC(dble)
+ TRAP(T_DOUBLEFLT)
+IDTVEC(fpusegm)
+ pushl $0; TRAP(T_FPOPFLT)
+IDTVEC(tss)
+ TRAP(T_TSSFLT)
+IDTVEC(missing)
+ TRAP(T_SEGNPFLT)
+IDTVEC(stk)
+ TRAP(T_STKFLT)
+IDTVEC(prot)
+ TRAP(T_PROTFLT)
+IDTVEC(page)
+ TRAP(T_PAGEFLT)
+IDTVEC(rsvd)
+ pushl $0; TRAP(T_RESERVED)
+IDTVEC(fpu)
+#if NNPX > 0
+ /*
+ * Handle like an interrupt so that we can call npxintr to clear the
+ * error. It would be better to handle npx interrupts as traps but
+ * this is difficult for nested interrupts.
+ */
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop /* silly, the bug is for popal and it only
+ * bites when the next instruction has a
+ * complicated address mode */
+ pushl %ds
+ pushl %es /* now the stack frame is a trap frame */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ pushl _cpl
+ pushl $0 /* dummy unit to finish building intr frame */
+ incl _cnt+V_TRAP
+ call _npxintr
+ jmp doreti
+#else /* NNPX > 0 */
+ pushl $0; TRAP(T_ARITHTRAP)
+#endif /* NNPX > 0 */
+ /* 17 - 31 reserved for future exp */
+IDTVEC(rsvd0)
+ pushl $0; TRAP(17)
+IDTVEC(rsvd1)
+ pushl $0; TRAP(18)
+IDTVEC(rsvd2)
+ pushl $0; TRAP(19)
+IDTVEC(rsvd3)
+ pushl $0; TRAP(20)
+IDTVEC(rsvd4)
+ pushl $0; TRAP(21)
+IDTVEC(rsvd5)
+ pushl $0; TRAP(22)
+IDTVEC(rsvd6)
+ pushl $0; TRAP(23)
+IDTVEC(rsvd7)
+ pushl $0; TRAP(24)
+IDTVEC(rsvd8)
+ pushl $0; TRAP(25)
+IDTVEC(rsvd9)
+ pushl $0; TRAP(26)
+IDTVEC(rsvd10)
+ pushl $0; TRAP(27)
+IDTVEC(rsvd11)
+ pushl $0; TRAP(28)
+IDTVEC(rsvd12)
+ pushl $0; TRAP(29)
+IDTVEC(rsvd13)
+ pushl $0; TRAP(30)
+IDTVEC(rsvd14)
+ pushl $0; TRAP(31)
+
+ SUPERALIGN_TEXT
+alltraps:
+ pushal
+ nop
+ pushl %ds
+ pushl %es
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+calltrap:
+ incl _cnt+V_TRAP
+ call _trap
+ /*
+ * Return through doreti to handle ASTs. Have to change trap frame
+ * to interrupt frame.
+ */
+ movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
+ pushl _cpl
+ pushl $0 /* dummy unit */
+ jmp doreti
+
+#ifdef KGDB
+/*
+ * This code checks for a kgdb trap, then falls through
+ * to the regular trap code.
+ */
+ SUPERALIGN_TEXT
+bpttraps:
+ pushal
+ nop
+ pushl %es
+ pushl %ds
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) /* non-kernel mode? */
+ jne calltrap /* yes */
+ call _kgdb_trap_glue
+ jmp calltrap
+#endif
+
+/*
+ * Call gate entry for syscall
+ */
+ SUPERALIGN_TEXT
+IDTVEC(syscall)
+ pushfl /* only for stupid carry bit and more stupid wait3 cc kludge */
+ /* XXX - also for direction flag (bzero, etc. clear it) */
+ pushal /* only need eax,ecx,edx - trap resaves others */
+ nop
+ movl $KDSEL,%eax /* switch to kernel segments */
+ movl %ax,%ds
+ movl %ax,%es
+ incl _cnt+V_SYSCALL
+ call _syscall
+ /*
+ * Return through doreti to handle ASTs. Have to change syscall frame
+ * to interrupt frame.
+ *
+ * XXX - we should have set up the frame earlier to avoid the
+ * following popal/pushal (not much can be done to avoid shuffling
+ * the flags). Consistent frames would simplify things all over.
+ */
+ movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
+ movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
+ movl 32+8(%esp),%ecx
+ movl %ebx,32+0(%esp)
+ movl %ecx,32+4(%esp)
+ movl %eax,32+8(%esp)
+ popal
+ nop
+ pushl $0 /* dummy error code */
+ pushl $T_ASTFLT
+ pushal
+ nop
+ movl __udatasel,%eax /* switch back to user segments */
+ pushl %eax /* XXX - better to preserve originals? */
+ pushl %eax
+ pushl _cpl
+ pushl $0
+ jmp doreti
+
+#ifdef SHOW_A_LOT
+/*
+ * 'show_bits' was too big when defined as a macro. The line length for some
+ * enclosing macro was too big for gas. Perhaps the code would have blown
+ * the cache anyway.
+ */
+ ALIGN_TEXT
+show_bits:
+ pushl %eax
+ SHOW_BIT(0)
+ SHOW_BIT(1)
+ SHOW_BIT(2)
+ SHOW_BIT(3)
+ SHOW_BIT(4)
+ SHOW_BIT(5)
+ SHOW_BIT(6)
+ SHOW_BIT(7)
+ SHOW_BIT(8)
+ SHOW_BIT(9)
+ SHOW_BIT(10)
+ SHOW_BIT(11)
+ SHOW_BIT(12)
+ SHOW_BIT(13)
+ SHOW_BIT(14)
+ SHOW_BIT(15)
+ popl %eax
+ ret
+
+ .data
+bit_colors:
+ .byte GREEN,RED,0,0
+ .text
+
+#endif /* SHOW_A_LOT */
+
+/*
+ * include generated interrupt vectors and ISA intr code
+ */
+#include "i386/isa/vector.s"
+#include "i386/isa/icu.s"
diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c
index 381f3df..84c212a 100644
--- a/sys/i386/i386/genassym.c
+++ b/sys/i386/i386/genassym.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
- * $Id: genassym.c,v 1.4 1993/10/12 15:33:18 rgrimes Exp $
+ * $Id: genassym.c,v 1.5 1993/10/15 10:34:17 rgrimes Exp $
*/
#include "sys/param.h"
@@ -96,10 +96,14 @@ main()
printf("#define\tCLSIZE %d\n", CLSIZE);
printf("#define\tNBPG %d\n", NBPG);
printf("#define\tNPTEPG %d\n", NPTEPG);
+ printf("#define\tPDESIZE %d\n", PDESIZE);
+ printf("#define\tPTESIZE %d\n", PTESIZE);
printf("#define\tNKPDE %d\n", NKPDE);
- printf("#define\tKPTDI %d\n", KPTDI);
- printf("#define\tPTDPTDI %d\n", PTDPTDI);
- printf("#define\tAPTDPTDI %d\n", APTDPTDI);
+ printf("#define\tKPTDI 0x%x\n", KPTDI);
+ printf("#define\tKSTKPTDI 0x%x\n", KSTKPTDI);
+ printf("#define\tKSTKPTEOFF 0x%x\n", KSTKPTEOFF);
+ printf("#define\tPTDPTDI 0x%x\n", PTDPTDI);
+ printf("#define\tAPTDPTDI 0x%x\n", APTDPTDI);
printf("#define\tPGSHIFT %d\n", PGSHIFT);
printf("#define\tPDRSHIFT %d\n", PDRSHIFT);
printf("#define\tSYSPTSIZE %d\n", SYSPTSIZE);
@@ -108,9 +112,8 @@ main()
#ifdef SYSVSHM
printf("#define\tSHMMAXPGS %d\n", SHMMAXPGS);
#endif
- printf("#define\tUSRSTACK %d\n", USRSTACK);
- printf("#define\tKERNBASE %d\n", KERNBASE);
- printf("#define\tKERNSIZE %d\n", KERNSIZE);
+ printf("#define\tUSRSTACK 0x%x\n", USRSTACK);
+ printf("#define\tKERNBASE 0x%x\n", KERNBASE);
printf("#define\tMSGBUFPTECNT %d\n", btoc(sizeof (struct msgbuf)));
printf("#define\tNMBCLUSTERS %d\n", NMBCLUSTERS);
printf("#define\tMCLBYTES %d\n", MCLBYTES);
diff --git a/sys/i386/i386/locore.s b/sys/i386/i386/locore.s
index d808571..4b4e36f 100644
--- a/sys/i386/i386/locore.s
+++ b/sys/i386/i386/locore.s
@@ -34,86 +34,41 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
- * $Id: locore.s,v 1.8 1993/10/15 10:34:19 rgrimes Exp $
+ * $Id$
*/
-
/*
- * locore.s: 4BSD machine support for the Intel 386
- * Preliminary version
- * Written by William F. Jolitz, 386BSD Project
+ * locore.s: FreeBSD machine support for the Intel 386
+ * originally from: locore.s, by William F. Jolitz
+ *
+ * Substantially rewritten by David Greenman, Rod Grimes,
+ * Bruce Evans, Wolfgang Solfrank, and many others.
*/
-#include "npx.h"
+#include "npx.h" /* for NNPX */
-#include "assym.s"
-#include "machine/psl.h"
-#include "machine/pte.h"
+#include "assym.s" /* system definitions */
+#include "machine/psl.h" /* processor status longword defs */
+#include "machine/pte.h" /* page table entry definitions */
-#include "errno.h"
+#include "errno.h" /* error return codes */
-#include "machine/trap.h"
+#include "machine/specialreg.h" /* x86 special registers */
+#include "i386/isa/debug.h" /* BDE debugging macros */
+#include "machine/cputypes.h" /* x86 cpu type definitions */
-#include "machine/specialreg.h"
-#include "i386/isa/debug.h"
-#include "machine/cputypes.h"
+#include "syscall.h" /* system call numbers */
-#define KDSEL 0x10
-#define SEL_RPL_MASK 0x0003
-#define TRAPF_CS_OFF (13 * 4)
+#include "machine/asmacros.h" /* miscellaneous asm macros */
/*
+ * XXX
+ *
* Note: This version greatly munged to avoid various assembler errors
* that may be fixed in newer versions of gas. Perhaps newer versions
* will have more pleasant appearance.
*/
- .set IDXSHIFT,10
-
-#define ALIGN_DATA .align 2
-#define ALIGN_TEXT .align 2,0x90 /* 4-byte boundaries, NOP-filled */
-#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte boundaries better for 486 */
-
-#define GEN_ENTRY(name) ALIGN_TEXT; .globl name; name:
-#define NON_GPROF_ENTRY(name) GEN_ENTRY(_/**/name)
-
-#ifdef GPROF
-/*
- * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
- * over the mcounting.
- */
-#define ALTENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
-#define ENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; 2:
-/*
- * The call to mcount supports the usual (bad) conventions. We allocate
- * some data and pass a pointer to it although the 386BSD doesn't use
- * the data. We set up a frame before calling mcount because that is
- * the standard convention although it makes work for both mcount and
- * callers.
- */
-#define MCOUNT .data; ALIGN_DATA; 1:; .long 0; .text; \
- pushl %ebp; movl %esp,%ebp; \
- movl $1b,%eax; call mcount; popl %ebp
-#else
-/*
- * ALTENTRY() has to align because it is before a corresponding ENTRY().
- * ENTRY() has to align to because there may be no ALTENTRY() before it.
- * If there is a previous ALTENTRY() then the alignment code is empty.
- */
-#define ALTENTRY(name) GEN_ENTRY(_/**/name)
-#define ENTRY(name) GEN_ENTRY(_/**/name)
-#endif
-
-/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
-/* XXX: NOP and FASTER_NOP are misleadingly named */
-#ifdef DUMMY_NOPS /* this will break some older machines */
-#define FASTER_NOP
-#define NOP
-#else
-#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
-#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
-#endif
-
/*
* PTmap is recursive pagemap at top of virtual address space.
* Within PTmap, the page directory can be found (third indirection).
@@ -121,8 +76,9 @@
.globl _PTmap,_PTD,_PTDpde,_Sysmap
.set _PTmap,PTDPTDI << PDRSHIFT
.set _PTD,_PTmap + (PTDPTDI * NBPG)
- .set _PTDpde,_PTD + (PTDPTDI * 4) /* XXX 4=sizeof pde */
+ .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
+/* Sysmap is the base address of the kernel page tables */
.set _Sysmap,_PTmap + (KPTDI * NBPG)
/*
@@ -132,7 +88,7 @@
.globl _APTmap,_APTD,_APTDpde
.set _APTmap,APTDPTDI << PDRSHIFT
.set _APTD,_APTmap + (APTDPTDI * NBPG)
- .set _APTDpde,_PTD + (APTDPTDI * 4) /* XXX 4=sizeof pde */
+ .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
/*
* Access to each processes kernel stack is via a region of
@@ -141,44 +97,41 @@
*/
.set _kstack,USRSTACK
.globl _kstack
- .set PPDROFF,0x3F6
- .set PPTEOFF,0x400-UPAGES /* 0x3FE */
-
/*
* Globals
*/
.data
.globl _esym
-_esym: .long 0 /* ptr to end of syms */
+_esym: .long 0 /* ptr to end of syms */
.globl _boothowto,_bootdev,_curpcb
.globl _cpu,_cold,_atdevbase
-_cpu: .long 0 /* are we 386, 386sx, or 486 */
-_cold: .long 1 /* cold till we are not */
-_atdevbase: .long 0 /* location of start of iomem in virtual */
-_atdevphys: .long 0 /* location of device mapping ptes (phys) */
+_cpu: .long 0 /* are we 386, 386sx, or 486 */
+_cold: .long 1 /* cold till we are not */
+_atdevbase: .long 0 /* location of start of iomem in virtual */
+_atdevphys: .long 0 /* location of device mapping ptes (phys) */
+
+ .globl _KERNend
+_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
.globl _IdlePTD,_KPTphys
-_IdlePTD: .long 0
-_KPTphys: .long 0
+_IdlePTD: .long 0 /* phys addr of kernel PTD */
+_KPTphys: .long 0 /* phys addr of kernel page tables */
- .globl _curpcb, _whichqs
-_curpcb: .long 0 /* pointer to curproc's PCB area */
-_whichqs: .long 0 /* which run queues have data */
+ .globl _cyloffset
+_cyloffset: .long 0 /* cylinder offset from boot blocks */
- .globl _cyloffset,_proc0paddr
-_cyloffset: .long 0
-_proc0paddr: .long 0
+ .globl _proc0paddr
+_proc0paddr: .long 0 /* address of proc 0 address space */
- /* Stuff for network ASTs */
- .globl _softem,_netisr,_astpending,_want_resched
-_softem: .long 0 /* WFJ only knows... */
-_netisr: .long 0 /* set with bits for which queue to service */
-_astpending: .long 0 /* tells us an AST needs to be taken */
-_want_resched: .long 0 /* we need to re-schedule */
+#ifdef BDE_DEBUGGER
+ .globl _bdb_exists /* flag to indicate BDE debugger is available */
+ .long 0
+#endif
+ .globl tmpstk
.space 512
tmpstk:
@@ -193,9 +146,9 @@ tmpstk:
* Also the entry point (jumped to directly from the boot blocks).
*/
ENTRY(btext)
- movw $0x1234,0x472 /* warm boot */
+ movw $0x1234,0x472 /* warm boot */
jmp 1f
- .space 0x500 /* skip over warm boot shit */
+ .space 0x500 /* skip over warm boot shit */
/*
* pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
@@ -249,53 +202,60 @@ ENTRY(btext)
* Oops, the gdt is in the carcass of the boot program so clearing
* the rest of memory is still not possible.
*/
- movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
+ movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
/*
* Virtual address space of kernel:
*
* text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
- * 0 1 2 3 4
+ * pages: 1 UPAGES (2) 1 NKPDE (7)
*/
/* find end of kernel image */
movl $_end-KERNBASE,%ecx
- addl $NBPG-1,%ecx /* page align up */
+ addl $NBPG-1,%ecx /* page align up */
andl $~(NBPG-1),%ecx
- movl %ecx,%esi /* esi=start of tables */
+ movl %ecx,%esi /* esi=start of tables */
+ movl %ecx,_KERNend-KERNBASE /* save end of kernel */
-/* clear bss and memory for bootstrap pagetables. */
+/* clear bss */
movl $_edata-KERNBASE,%edi
- subl %edi,%ecx
- addl $(UPAGES+5)*NBPG,%ecx /* size of tables */
-
- xorl %eax,%eax /* pattern */
+ subl %edi,%ecx /* get mount to clear */
+ xorl %eax,%eax /* specify zero fill */
cld
rep
stosb
/*
- * If we are loaded at 0x0 check to see if we have space for the
- * page tables pages after the kernel and before the 640K ISA memory
- * hole. If we do not have space relocate the page table pages and
- * the kernel stack to start at 1MB. The value that ends up in esi
- * is used by the rest of locore to build the tables. Locore adjusts
- * esi each time it allocates a structure and then passes the final
- * value to init386(first) as the value first. esi should ALWAYS
- * be page aligned!!
- */
- movl %esi,%ecx /* Get current first availiable address */
- cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
- jge 1f /* yep, don't need to check for room */
- addl $(NKPDE + 4) * NBPG,%ecx /* XXX the 4 is for kstack */
- /* space for kstack, PTD and PTE's */
- cmpl $(640*1024),%ecx
- /* see if it fits in low memory */
- jle 1f /* yep, don't need to relocate it */
- movl $0x100000,%esi /* won't fit, so start it at 1MB */
+ * If we are loaded at 0x0 check to see if we have space for the
+ * page dir/tables and stack area after the kernel and before the 640K
+ * ISA memory hole. If we do not have space relocate the page directory,
+ * UPAGES, proc 0 stack, and page table pages to start at 1MB. The value
+ * that ends up in esi, which points to the kernel page directory, is
+ * used by the rest of locore to build the tables.
+ * esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPDE(number of kernel
+ * page table pages) is then passed on the stack to init386(first) as
+ * the value first. esi should ALWAYS be page aligned!!
+ */
+ movl %esi,%ecx /* Get current first availiable address */
+ cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
+ jge 1f /* yep, don't need to check for room */
+ addl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* XXX the 4 is for kstack */
+ /* space for kstack, PTD and PTE's */
+ cmpl $(640*1024),%ecx /* see if it fits in low memory */
+ jle 1f /* yep, don't need to relocate it */
+ movl $0x100000,%esi /* won't fit, so start it at 1MB */
1:
-/* physical address of Idle Address space */
+/* clear pagetables, page directory, stack, etc... */
+ movl %esi,%edi /* base (page directory) */
+ movl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* amount to clear */
+ xorl %eax,%eax /* specify zero fill */
+ cld
+ rep
+ stosb
+
+/* physical address of Idle proc/kernel page directory */
movl %esi,_IdlePTD-KERNBASE
/*
@@ -312,69 +272,100 @@ ENTRY(btext)
/*
* Map Kernel
- * N.B. don't bother with making kernel text RO, as 386
- * ignores R/W AND U/S bits on kernel access (only v works) !
*
* First step - build page tables
*/
- movl %esi,%ecx /* this much memory, */
- shrl $PGSHIFT,%ecx /* for this many pte s */
- addl $UPAGES+4,%ecx /* including our early context */
- cmpl $0xa0,%ecx /* XXX - cover debugger pages */
+#if defined (KGDB) || defined (BDE_DEBUGGER)
+ movl _KERNend-KERNBASE,%ecx /* this much memory, */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+#ifdef BDE_DEBUGGER
+ cmpl $0xa0,%ecx /* XXX - cover debugger pages */
jae 1f
movl $0xa0,%ecx
1:
- movl $PG_V|PG_KW,%eax /* having these bits set, */
- lea (4*NBPG)(%esi),%ebx /* physical address of KPT in proc 0, */
- movl %ebx,_KPTphys-KERNBASE /* in the kernel page table, */
+#endif /* BDE_DEBUGGER */
+ movl $PG_V|PG_KW,%eax /* having these bits set, */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
fillkpt
+#else /* !KGDB && !BDE_DEBUGGER */
+ /* write protect kernel text (doesn't do a thing for 386's - only 486's) */
+ movl $_etext-KERNBASE,%ecx /* get size of text */
+ shrl $PGSHIFT,%ecx /* for this many PTEs */
+ movl $PG_V|PG_KR,%eax /* specify read only */
+ lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
+ movl %ebx,_KPTphys-KERNBASE /* save in global */
+ fillkpt
+
+ /* data and bss are r/w */
+ andl $PG_FRAME,%eax /* strip to just addr of bss */
+ movl _KERNend-KERNBASE,%ecx /* calculate size */
+ subl %eax,%ecx
+ shrl $PGSHIFT,%ecx
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ fillkpt
+#endif
+
+/* now initialize the page dir, upages, p0stack PT, and page tables */
+
+ movl $(1+UPAGES+1+NKPDE),%ecx /* number of PTEs */
+ movl %esi,%eax /* phys address of PTD */
+ andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ movl %esi,%ebx /* calculate pte offset to ptd */
+ shrl $PGSHIFT-2,%ebx
+ addl %esi,%ebx /* address of page directory */
+ addl $((1+UPAGES+1)*NBPG),%ebx /* offset to kernel page tables */
+ fillkpt
+
/* map I/O memory map */
- movl $0x100-0xa0,%ecx /* for this many pte s, */
- movl $(0xa0000|PG_V|PG_UW),%eax /* having these bits set,(perhaps URW?) XXX 06 Aug 92 */
- movl %ebx,_atdevphys-KERNBASE /* remember phys addr of ptes */
+ movl _KPTphys-KERNBASE,%ebx /* base of kernel page tables */
+ lea (0xa0 * PTESIZE)(%ebx),%ebx /* hardwire ISA hole at KERNBASE + 0xa0000 */
+ movl $0x100-0xa0,%ecx /* for this many pte s, */
+ movl $(0xa0000|PG_V|PG_KW),%eax /* valid, kernel read/write */
+ movl %ebx,_atdevphys-KERNBASE /* save phys addr of ptes */
fillkpt
/* map proc 0's kernel stack into user page table page */
- movl $UPAGES,%ecx /* for this many pte s, */
- lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
- lea (KERNBASE)(%eax),%edx
- movl %edx,_proc0paddr-KERNBASE
- /* remember VA for 0th process init */
- orl $PG_V|PG_KW,%eax /* having these bits set, */
- lea (3*NBPG)(%esi),%ebx /* physical address of stack pt in proc 0 */
- addl $(PPTEOFF*4),%ebx
+ movl $UPAGES,%ecx /* for this many pte s, */
+ lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
+ lea (KERNBASE)(%eax),%edx /* change into virtual addr */
+ movl %edx,_proc0paddr-KERNBASE /* save VA for proc 0 init */
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ lea ((1+UPAGES)*NBPG)(%esi),%ebx /* addr of stack page table in proc 0 */
+ addl $(KSTKPTEOFF * PTESIZE),%ebx /* offset to kernel stack PTE */
fillkpt
/*
- * Construct a page table directory
- * (of page directory elements - pde's)
+ * Initialize kernel page table directory
*/
/* install a pde for temporary double map of bottom of VA */
- lea (4*NBPG)(%esi),%eax /* physical address of kernel page table */
- orl $PG_V|PG_UW,%eax /* pde entry is valid XXX 06 Aug 92 */
- movl %eax,(%esi) /* which is where temp maps! */
+ movl _KPTphys-KERNBASE,%eax
+ orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ movl %eax,(%esi) /* which is where temp maps! */
- /* kernel pde's */
- movl $(NKPDE),%ecx /* for this many pde s, */
- lea (KPTDI*4)(%esi),%ebx /* offset of pde for kernel */
+ /* initialize kernel pde's */
+ movl $(NKPDE),%ecx /* for this many PDEs */
+ lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
fillkpt
/* install a pde recursively mapping page directory as a page table! */
- movl %esi,%eax /* phys address of ptd in proc 0 */
- orl $PG_V|PG_UW,%eax /* pde entry is valid XXX 06 Aug 92 */
- movl %eax,PTDPTDI*4(%esi) /* which is where PTmap maps! */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,PTDPTDI*PDESIZE(%esi) /* which is where PTmap maps! */
/* install a pde to map kernel stack for proc 0 */
- lea (3*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
- orl $PG_V|PG_KW,%eax /* pde entry is valid */
- movl %eax,PPDROFF*4(%esi) /* which is where kernel stack maps! */
+ lea ((1+UPAGES)*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
+ orl $PG_V|PG_KW,%eax /* pde entry is valid */
+ movl %eax,KSTKPTDI*PDESIZE(%esi) /* which is where kernel stack maps! */
+#ifdef BDE_DEBUGGER
/* copy and convert stuff from old gdt and idt for debugger */
- cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
+ cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
jne 1f
movb $1,_bdb_exists-KERNBASE
1:
@@ -382,23 +373,23 @@ ENTRY(btext)
subl $2*6,%esp
sgdt (%esp)
- movl 2(%esp),%esi /* base address of current gdt */
+ movl 2(%esp),%esi /* base address of current gdt */
movl $_gdt-KERNBASE,%edi
movl %edi,2(%esp)
movl $8*18/4,%ecx
- rep /* copy gdt */
+ rep /* copy gdt */
movsl
movl $_gdt-KERNBASE,-8+2(%edi) /* adjust gdt self-ptr */
movb $0x92,-8+5(%edi)
sidt 6(%esp)
- movl 6+2(%esp),%esi /* base address of current idt */
- movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
+ movl 6+2(%esp),%esi /* base address of current idt */
+ movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
movw 8(%esi),%ax
movl %eax,bdb_dbg_ljmp+1-KERNBASE /* ... immediate offset ... */
movl 8+2(%esi),%eax
movw %ax,bdb_dbg_ljmp+5-KERNBASE /* ... and selector for ljmp */
- movl 24+4(%esi),%eax /* same for bpt descriptor */
+ movl 24+4(%esi),%eax /* same for bpt descriptor */
movw 24(%esi),%ax
movl %eax,bdb_bpt_ljmp+1-KERNBASE
movl 24+2(%esi),%eax
@@ -407,7 +398,7 @@ ENTRY(btext)
movl $_idt-KERNBASE,%edi
movl %edi,6+2(%esp)
movl $8*4/4,%ecx
- rep /* copy idt */
+ rep /* copy idt */
movsl
lgdt (%esp)
@@ -415,12 +406,13 @@ ENTRY(btext)
addl $2*6,%esp
popal
+#endif
/* load base of page directory and enable mapping */
- movl %esi,%eax /* phys address of ptd in proc 0 */
+ movl %esi,%eax /* phys address of ptd in proc 0 */
orl $I386_CR3PAT,%eax
- movl %eax,%cr3 /* load ptd addr into mmu */
- movl %cr0,%eax /* get control word */
+ movl %eax,%cr3 /* load ptd addr into mmu */
+ movl %cr0,%eax /* get control word */
/*
* XXX it is now safe to always (attempt to) set CR0_WP and to set up
* the page tables assuming it works, so USE_486_WRITE_PROTECT will go
@@ -430,58 +422,59 @@ ENTRY(btext)
#ifdef USE_486_WRITE_PROTECT
orl $CR0_PE|CR0_PG|CR0_WP,%eax /* enable paging */
#else
- orl $CR0_PE|CR0_PG,%eax /* enable paging */
+ orl $CR0_PE|CR0_PG,%eax /* enable paging */
#endif
- movl %eax,%cr0 /* and let's page NOW! */
+ movl %eax,%cr0 /* and let's page NOW! */
- pushl $begin /* jump to high mem */
+ pushl $begin /* jump to high mem */
ret
begin: /* now running relocated at KERNBASE where the system is linked to run */
- .globl _Crtat /* XXX - locore should not know about */
- movl _Crtat,%eax /* variables of device drivers (pccons)! */
+ .globl _Crtat /* XXX - locore should not know about */
+ movl _Crtat,%eax /* variables of device drivers (pccons)! */
subl $(KERNBASE+0xA0000),%eax
- movl _atdevphys,%edx /* get pte PA */
- subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
- shll $PGSHIFT-2,%edx /* corresponding to virt offset */
- addl $KERNBASE,%edx /* add virtual base */
+ movl _atdevphys,%edx /* get pte PA */
+ subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
+ shll $PGSHIFT-2,%edx /* corresponding to virt offset */
+ addl $KERNBASE,%edx /* add virtual base */
movl %edx,_atdevbase
addl %eax,%edx
movl %edx,_Crtat
- /* set up bootstrap stack */
+ /* set up bootstrap stack - 48 bytes */
movl $_kstack+UPAGES*NBPG-4*12,%esp /* bootstrap stack end location */
- xorl %eax,%eax /* mark end of frames */
+ xorl %eax,%eax /* mark end of frames */
movl %eax,%ebp
movl _proc0paddr,%eax
movl %esi,PCB_CR3(%eax)
+#ifdef BDE_DEBUGGER
/* relocate debugger gdt entries */
- movl $_gdt+8*9,%eax /* adjust slots 9-17 */
+ movl $_gdt+8*9,%eax /* adjust slots 9-17 */
movl $9,%ecx
reloc_gdt:
- movb $0xfe,7(%eax) /* top byte of base addresses, was 0, */
- addl $8,%eax /* now KERNBASE>>24 */
+ movb $0xfe,7(%eax) /* top byte of base addresses, was 0, */
+ addl $8,%eax /* now KERNBASE>>24 */
loop reloc_gdt
cmpl $0,_bdb_exists
je 1f
int $3
1:
+#endif
/*
* Skip over the page tables and the kernel stack
- * XXX 4 is kstack size
*/
- lea (NKPDE + 4) * NBPG(%esi),%esi
+ lea ((1+UPAGES+1+NKPDE)*NBPG)(%esi),%esi
- pushl %esi /* value of first for init386(first) */
- call _init386 /* wire 386 chip for unix operation */
+ pushl %esi /* value of first for init386(first) */
+ call _init386 /* wire 386 chip for unix operation */
movl $0,_PTD
- call _main /* autoconfiguration, mountroot etc */
+ call _main /* autoconfiguration, mountroot etc */
popl %esi
/*
@@ -495,25 +488,22 @@ reloc_gdt:
movl __ucodesel,%eax
movl __udatasel,%ecx
/* build outer stack frame */
- pushl %ecx /* user ss */
- pushl $USRSTACK /* user esp */
- pushl %eax /* user cs */
- pushl $0 /* user ip */
+ pushl %ecx /* user ss */
+ pushl $USRSTACK /* user esp */
+ pushl %eax /* user cs */
+ pushl $0 /* user ip */
movl %cx,%ds
movl %cx,%es
- movl %ax,%fs /* double map cs to fs */
- movl %cx,%gs /* and ds to gs */
- lret /* goto user! */
+ movl %ax,%fs /* double map cs to fs */
+ movl %cx,%gs /* and ds to gs */
+ lret /* goto user! */
- pushl $lretmsg1 /* "should never get here!" */
+ pushl $lretmsg1 /* "should never get here!" */
call _panic
lretmsg1:
.asciz "lret: toinit\n"
- .set exec,59
- .set exit,1
-
#define LCALL(x,y) .byte 0x9a ; .long y; .word x
/*
* Icode is copied out to process 1 and executed in user mode:
@@ -521,36 +511,36 @@ lretmsg1:
* If the execve fails, process 1 exits and the system panics.
*/
NON_GPROF_ENTRY(icode)
- pushl $0 /* envp for execve() */
+ pushl $0 /* envp for execve() */
-# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
+# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
movl $argv,%eax
subl $_icode,%eax
- pushl %eax /* argp for execve() */
+ pushl %eax /* argp for execve() */
# pushl $init-_icode
movl $init,%eax
subl $_icode,%eax
- pushl %eax /* fname for execve() */
+ pushl %eax /* fname for execve() */
- pushl %eax /* dummy return address */
+ pushl %eax /* dummy return address */
- movl $exec,%eax
+ movl $SYS_execve,%eax
LCALL(0x7,0x0)
/* exit if something botches up in the above execve() */
- pushl %eax /* execve failed, the errno will do for an */
- /* exit code because errnos are < 128 */
- pushl %eax /* dummy return address */
- movl $exit,%eax
+ pushl %eax /* execve failed, the errno will do for an */
+ /* exit code because errnos are < 128 */
+ pushl %eax /* dummy return address */
+ movl $SYS_exit,%eax
LCALL(0x7,0x0)
init:
.asciz "/sbin/init"
ALIGN_DATA
argv:
- .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
- .long eicode-_icode /* argv[1] follows icode after copyout */
+ .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
+ .long eicode-_icode /* argv[1] follows icode after copyout */
.long 0
eicode:
@@ -560,1604 +550,14 @@ _szicode:
NON_GPROF_ENTRY(sigcode)
call SIGF_HANDLER(%esp)
- lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
- /* copy at 8(%esp)) */
+ lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
+ /* copy at 8(%esp)) */
pushl %eax
- pushl %eax /* junk to fake return address */
- movl $103,%eax /* XXX sigreturn() */
- LCALL(0x7,0) /* enter kernel with args on stack */
- hlt /* never gets here */
+ pushl %eax /* junk to fake return address */
+ movl $103,%eax /* XXX sigreturn() */
+ LCALL(0x7,0) /* enter kernel with args on stack */
+ hlt /* never gets here */
.globl _szsigcode
_szsigcode:
.long _szsigcode-_sigcode
-
-/*
- * Support routines for GCC, general C-callable functions
- */
-ENTRY(__udivsi3)
- movl 4(%esp),%eax
- xorl %edx,%edx
- divl 8(%esp)
- ret
-
-ENTRY(__divsi3)
- movl 4(%esp),%eax
- cltd
- idivl 8(%esp)
- ret
-
- /*
- * I/O bus instructions via C
- */
-ENTRY(inb) /* val = inb(port) */
- movl 4(%esp),%edx
- subl %eax,%eax
- NOP
- inb %dx,%al
- ret
-
-ENTRY(inw) /* val = inw(port) */
- movl 4(%esp),%edx
- subl %eax,%eax
- NOP
- inw %dx,%ax
- ret
-
-ENTRY(insb) /* insb(port, addr, cnt) */
- pushl %edi
- movw 8(%esp),%dx
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- insb
- NOP
- movl %edi,%eax
- popl %edi
- ret
-
-ENTRY(insw) /* insw(port, addr, cnt) */
- pushl %edi
- movw 8(%esp),%dx
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- insw
- NOP
- movl %edi,%eax
- popl %edi
- ret
-
-ENTRY(rtcin) /* rtcin(val) */
- movl 4(%esp),%eax
- outb %al,$0x70
- subl %eax,%eax
- inb $0x71,%al
- ret
-
-ENTRY(outb) /* outb(port, val) */
- movl 4(%esp),%edx
- NOP
- movl 8(%esp),%eax
- outb %al,%dx
- NOP
- ret
-
-ENTRY(outw) /* outw(port, val) */
- movl 4(%esp),%edx
- NOP
- movl 8(%esp),%eax
- outw %ax,%dx
- NOP
- ret
-
-ENTRY(outsb) /* outsb(port, addr, cnt) */
- pushl %esi
- movw 8(%esp),%dx
- movl 12(%esp),%esi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- outsb
- NOP
- movl %esi,%eax
- popl %esi
- ret
-
-ENTRY(outsw) /* outsw(port, addr, cnt) */
- pushl %esi
- movw 8(%esp),%dx
- movl 12(%esp),%esi
- movl 16(%esp),%ecx
- cld
- NOP
- rep
- outsw
- NOP
- movl %esi,%eax
- popl %esi
- ret
-
- /*
- * bcopy family
- */
-ENTRY(bzero) /* void bzero(void *base, u_int cnt) */
- pushl %edi
- movl 8(%esp),%edi
- movl 12(%esp),%ecx
- xorl %eax,%eax
- shrl $2,%ecx
- cld
- rep
- stosl
- movl 12(%esp),%ecx
- andl $3,%ecx
- rep
- stosb
- popl %edi
- ret
-
-ENTRY(fillw) /* fillw(pat, base, cnt) */
- pushl %edi
- movl 8(%esp),%eax
- movl 12(%esp),%edi
- movl 16(%esp),%ecx
- cld
- rep
- stosw
- popl %edi
- ret
-
-ENTRY(bcopyb)
-bcopyb:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards. */
- addl %ecx,%esi
- std
- decl %edi
- decl %esi
- rep
- movsb
- popl %edi
- popl %esi
- cld
- ret
-
-ENTRY(bcopyw)
-bcopyw:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- shrl $1,%ecx /* copy by 16-bit words */
- rep
- movsw
- adc %ecx,%ecx /* any bytes left? */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards */
- addl %ecx,%esi
- std
- andl $1,%ecx /* any fractional bytes? */
- decl %edi
- decl %esi
- rep
- movsb
- movl 20(%esp),%ecx /* copy remainder by 16-bit words */
- shrl $1,%ecx
- decl %esi
- decl %edi
- rep
- movsw
- popl %edi
- popl %esi
- cld
- ret
-
-ENTRY(bcopyx)
- movl 16(%esp),%eax
- cmpl $2,%eax
- je bcopyw /* not _bcopyw, to avoid multiple mcounts */
- cmpl $4,%eax
- je bcopy
- jmp bcopyb
-
- /*
- * (ov)bcopy(src, dst, cnt)
- * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
- */
-ALTENTRY(ovbcopy)
-ENTRY(bcopy)
-bcopy:
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
- cmpl %esi,%edi /* potentially overlapping? */
- jnb 1f
- cld /* nope, copy forwards */
- shrl $2,%ecx /* copy by 32-bit words */
- rep
- movsl
- movl 20(%esp),%ecx
- andl $3,%ecx /* any bytes left? */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
- ALIGN_TEXT
-1:
- addl %ecx,%edi /* copy backwards */
- addl %ecx,%esi
- std
- andl $3,%ecx /* any fractional bytes? */
- decl %edi
- decl %esi
- rep
- movsb
- movl 20(%esp),%ecx /* copy remainder by 32-bit words */
- shrl $2,%ecx
- subl $3,%esi
- subl $3,%edi
- rep
- movsl
- popl %edi
- popl %esi
- cld
- ret
-
-ALTENTRY(ntohl)
-ENTRY(htonl)
- movl 4(%esp),%eax
-#ifdef i486
- /* XXX */
- /* Since Gas 1.38 does not grok bswap this has been coded as the
- * equivalent bytes. This can be changed back to bswap when we
- * upgrade to a newer version of Gas */
- /* bswap %eax */
- .byte 0x0f
- .byte 0xc8
-#else
- xchgb %al,%ah
- roll $16,%eax
- xchgb %al,%ah
-#endif
- ret
-
-ALTENTRY(ntohs)
-ENTRY(htons)
- movzwl 4(%esp),%eax
- xchgb %al,%ah
- ret
-
-/*****************************************************************************/
-/* copyout and fubyte family */
-/*****************************************************************************/
-/*
- * Access user memory from inside the kernel. These routines and possibly
- * the math- and DOS emulators should be the only places that do this.
- *
- * We have to access the memory with user's permissions, so use a segment
- * selector with RPL 3. For writes to user space we have to additionally
- * check the PTE for write permission, because the 386 does not check
- * write permissions when we are executing with EPL 0. The 486 does check
- * this if the WP bit is set in CR0, so we can use a simpler version here.
- *
- * These routines set curpcb->onfault for the time they execute. When a
- * protection violation occurs inside the functions, the trap handler
- * returns to *curpcb->onfault instead of the function.
- */
-
-
-ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
- movl _curpcb,%eax
- movl $copyout_fault,PCB_ONFAULT(%eax)
- pushl %esi
- pushl %edi
- pushl %ebx
- movl 16(%esp),%esi
- movl 20(%esp),%edi
- movl 24(%esp),%ebx
- orl %ebx,%ebx /* anything to do? */
- jz done_copyout
-
- /*
- * Check explicitly for non-user addresses. If 486 write protection
- * is being used, this check is essential because we are in kernel
- * mode so the h/w does not provide any protection against writing
- * kernel addresses.
- *
- * Otherwise, it saves having to load and restore %es to get the
- * usual segment-based protection (the destination segment for movs
- * is always %es). The other explicit checks for user-writablility
- * are not quite sufficient. They fail for the user area because
- * we mapped the user area read/write to avoid having an #ifdef in
- * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
- * addresses including 0xff800000 and 0xfc000000). I'm not sure if
- * this can be fixed. Marking the PTEs supervisor mode and the
- * PDE's user mode would almost work, but there may be a problem
- * with the self-referential PDE.
- */
- movl %edi,%eax
- addl %ebx,%eax
- jc copyout_fault
-#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
- cmpl $VM_END_USER_ADDRESS,%eax
- ja copyout_fault
-
-#ifndef USE_486_WRITE_PROTECT
- /*
- * We have to check each PTE for user write permission.
- * The checking may cause a page fault, so it is important to set
- * up everything for return via copyout_fault before here.
- */
- /* compute number of pages */
- movl %edi,%ecx
- andl $NBPG-1,%ecx
- addl %ebx,%ecx
- decl %ecx
- shrl $IDXSHIFT+2,%ecx
- incl %ecx
-
- /* compute PTE offset for start address */
- movl %edi,%edx
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
-
-1: /* check PTE for each page */
- movb _PTmap(%edx),%al
- andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
- cmpb $0x07,%al
- je 2f
-
- /* simulate a trap */
- pushl %edx
- pushl %ecx
- shll $IDXSHIFT,%edx
- pushl %edx
- call _trapwrite /* trapwrite(addr) */
- popl %edx
- popl %ecx
- popl %edx
-
- orl %eax,%eax /* if not ok, return EFAULT */
- jnz copyout_fault
-
-2:
- addl $4,%edx
- decl %ecx
- jnz 1b /* check next page */
-#endif /* ndef USE_486_WRITE_PROTECT */
-
- /* bcopy(%esi, %edi, %ebx) */
- cld
- movl %ebx,%ecx
- shrl $2,%ecx
- rep
- movsl
- movb %bl,%cl
- andb $3,%cl /* XXX can we trust the rest of %ecx on clones? */
- rep
- movsb
-
-done_copyout:
- popl %ebx
- popl %edi
- popl %esi
- xorl %eax,%eax
- movl _curpcb,%edx
- movl %eax,PCB_ONFAULT(%edx)
- ret
-
- ALIGN_TEXT
-copyout_fault:
- popl %ebx
- popl %edi
- popl %esi
- movl _curpcb,%edx
- movl $0,PCB_ONFAULT(%edx)
- movl $EFAULT,%eax
- ret
-
-ENTRY(copyin) /* copyin(from_user, to_kernel, len) */
- movl _curpcb,%eax
- movl $copyin_fault,PCB_ONFAULT(%eax)
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi /* caddr_t from */
- movl 16(%esp),%edi /* caddr_t to */
- movl 20(%esp),%ecx /* size_t len */
-
- movb %cl,%al
- shrl $2,%ecx /* copy longword-wise */
- cld
- gs
- rep
- movsl
- movb %al,%cl
- andb $3,%cl /* copy remaining bytes */
- gs
- rep
- movsb
-
- popl %edi
- popl %esi
- xorl %eax,%eax
- movl _curpcb,%edx
- movl %eax,PCB_ONFAULT(%edx)
- ret
-
- ALIGN_TEXT
-copyin_fault:
- popl %edi
- popl %esi
- movl _curpcb,%edx
- movl $0,PCB_ONFAULT(%edx)
- movl $EFAULT,%eax
- ret
-
- /*
- * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
- */
-ALTENTRY(fuiword)
-ENTRY(fuword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(fusword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movzwl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(fuibyte)
-ENTRY(fubyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- gs
- movzbl (%edx),%eax
- movl $0,PCB_ONFAULT(%ecx)
- ret
-
- ALIGN_TEXT
-fusufault:
- movl _curpcb,%ecx
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- decl %eax
- ret
-
- /*
- * su{byte,sword,word}: write a byte(word, longword) to user memory
- */
-#ifdef USE_486_WRITE_PROTECT
- /*
- * we only have to set the right segment selector.
- */
-ALTENTRY(suiword)
-ENTRY(suword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movl %eax,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(susword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movw 8(%esp),%ax
- gs
- movw %ax,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(suibyte)
-ENTRY(subyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movb 8(%esp),%al
- gs
- movb %al,(%edx)
- xorl %eax,%eax
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-
-#else /* USE_486_WRITE_PROTECT */
- /*
- * here starts the trouble again: check PTE, twice if word crosses
- * a page boundary.
- */
- /* XXX - page boundary crossing is not handled yet */
-
-ALTENTRY(suibyte)
-ENTRY(subyte)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movb %al,(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ENTRY(susword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movw %ax,(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-ALTENTRY(suiword)
-ENTRY(suword)
- movl _curpcb,%ecx
- movl $fusufault,PCB_ONFAULT(%ecx)
- movl 4(%esp),%edx
- movl %edx,%eax
- shrl $IDXSHIFT,%edx
- andb $0xfc,%dl
- movb _PTmap(%edx),%dl
- andb $0x7,%dl /* must be VALID + USERACC + WRITE */
- cmpb $0x7,%dl
- je 1f
- /* simulate a trap */
- pushl %eax
- call _trapwrite
- popl %edx
- orl %eax,%eax
- jnz fusufault
-1:
- movl 4(%esp),%edx
- movl 8(%esp),%eax
- gs
- movl %eax,0(%edx)
- xorl %eax,%eax
- movl _curpcb,%ecx
- movl %eax,PCB_ONFAULT(%ecx)
- ret
-
-#endif /* USE_486_WRITE_PROTECT */
-
-/*
- * copyoutstr(from, to, maxlen, int *lencopied)
- * copy a string from from to to, stop when a 0 character is reached.
- * return ENAMETOOLONG if string is longer than maxlen, and
- * EFAULT on protection violations. If lencopied is non-zero,
- * return the actual length in *lencopied.
- */
-#ifdef USE_486_WRITE_PROTECT
-
-ENTRY(copyoutstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- /*
- * gs override doesn't work for stosb. Use the same explicit check
- * as in copyout(). It's much slower now because it is per-char.
- * XXX - however, it would be faster to rewrite this function to use
- * strlen() and copyout().
- */
- cmpl $VM_END_USER_ADDRESS,%edi
- jae cpystrflt
- lodsb
- gs
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-#else /* ndef USE_486_WRITE_PROTECT */
-
-ENTRY(copyoutstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
-1:
- /*
- * It suffices to check that the first byte is in user space, because
- * we look at a page at a time and the end address is on a page
- * boundary.
- */
- cmpl $VM_END_USER_ADDRESS,%edi
- jae cpystrflt
- movl %edi,%eax
- shrl $IDXSHIFT,%eax
- andb $0xfc,%al
- movb _PTmap(%eax),%al
- andb $7,%al
- cmpb $7,%al
- je 2f
-
- /* simulate trap */
- pushl %edx
- pushl %edi
- call _trapwrite
- popl %edi
- popl %edx
- orl %eax,%eax
- jnz cpystrflt
-
-2: /* copy up to end of this page */
- movl %edi,%eax
- andl $NBPG-1,%eax
- movl $NBPG,%ecx
- subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
- cmpl %ecx,%edx
- jge 3f
- movl %edx,%ecx /* ecx = min(ecx, edx) */
-3:
- orl %ecx,%ecx
- jz 4f
- decl %ecx
- decl %edx
- lodsb
- stosb
- orb %al,%al
- jnz 3b
-
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-
-4: /* next page */
- orl %edx,%edx
- jnz 1b
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-#endif /* USE_486_WRITE_PROTECT */
-
-/*
- * copyinstr(from, to, maxlen, int *lencopied)
- * copy a string from from to to, stop when a 0 character is reached.
- * return ENAMETOOLONG if string is longer than maxlen, and
- * EFAULT on protection violations. If lencopied is non-zero,
- * return the actual length in *lencopied.
- */
-ENTRY(copyinstr)
- pushl %esi
- pushl %edi
- movl _curpcb,%ecx
- movl $cpystrflt,PCB_ONFAULT(%ecx)
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- gs
- lodsb
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
- jmp 6f
-
-cpystrflt:
- movl $EFAULT,%eax
-6: /* set *lencopied and return %eax */
- movl _curpcb,%ecx
- movl $0,PCB_ONFAULT(%ecx)
- movl 20(%esp),%ecx
- subl %edx,%ecx
- movl 24(%esp),%edx
- orl %edx,%edx
- jz 7f
- movl %ecx,(%edx)
-7:
- popl %edi
- popl %esi
- ret
-
-
-/*
- * copystr(from, to, maxlen, int *lencopied)
- */
-ENTRY(copystr)
- pushl %esi
- pushl %edi
-
- movl 12(%esp),%esi /* %esi = from */
- movl 16(%esp),%edi /* %edi = to */
- movl 20(%esp),%edx /* %edx = maxlen */
- incl %edx
-
-1:
- decl %edx
- jz 4f
- lodsb
- stosb
- orb %al,%al
- jnz 1b
- /* Success -- 0 byte reached */
- decl %edx
- xorl %eax,%eax
- jmp 6f
-4:
- /* edx is zero -- return ENAMETOOLONG */
- movl $ENAMETOOLONG,%eax
-
-6: /* set *lencopied and return %eax */
- movl 20(%esp),%ecx
- subl %edx,%ecx
- movl 24(%esp),%edx
- orl %edx,%edx
- jz 7f
- movl %ecx,(%edx)
-7:
- popl %edi
- popl %esi
- ret
-
-/*
- * Handling of special 386 registers and descriptor tables etc
- */
-ENTRY(lgdt) /* void lgdt(struct region_descriptor *rdp); */
- /* reload the descriptor table */
- movl 4(%esp),%eax
- lgdt (%eax)
- /* flush the prefetch q */
- jmp 1f
- nop
-1:
- /* reload "stale" selectors */
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- movl %ax,%ss
-
- /* reload code selector by turning return into intersegmental return */
- movl (%esp),%eax
- pushl %eax
-# movl $KCSEL,4(%esp)
- movl $8,4(%esp)
- lret
-
- /*
- * void lidt(struct region_descriptor *rdp);
- */
-ENTRY(lidt)
- movl 4(%esp),%eax
- lidt (%eax)
- ret
-
- /*
- * void lldt(u_short sel)
- */
-ENTRY(lldt)
- lldt 4(%esp)
- ret
-
- /*
- * void ltr(u_short sel)
- */
-ENTRY(ltr)
- ltr 4(%esp)
- ret
-
-ENTRY(ssdtosd) /* ssdtosd(*ssdp,*sdp) */
- pushl %ebx
- movl 8(%esp),%ecx
- movl 8(%ecx),%ebx
- shll $16,%ebx
- movl (%ecx),%edx
- roll $16,%edx
- movb %dh,%bl
- movb %dl,%bh
- rorl $8,%ebx
- movl 4(%ecx),%eax
- movw %ax,%dx
- andl $0xf0000,%eax
- orl %eax,%ebx
- movl 12(%esp),%ecx
- movl %edx,(%ecx)
- movl %ebx,4(%ecx)
- popl %ebx
- ret
-
-
-ENTRY(tlbflush) /* tlbflush() */
- movl %cr3,%eax
- orl $I386_CR3PAT,%eax
- movl %eax,%cr3
- ret
-
-
-ENTRY(load_cr0) /* load_cr0(cr0) */
- movl 4(%esp),%eax
- movl %eax,%cr0
- ret
-
-
-ENTRY(rcr0) /* rcr0() */
- movl %cr0,%eax
- ret
-
-
-ENTRY(rcr2) /* rcr2() */
- movl %cr2,%eax
- ret
-
-
-ENTRY(rcr3) /* rcr3() */
- movl %cr3,%eax
- ret
-
-
-ENTRY(load_cr3) /* void load_cr3(caddr_t cr3) */
- movl 4(%esp),%eax
- orl $I386_CR3PAT,%eax
- movl %eax,%cr3
- ret
-
-
-/*****************************************************************************/
-/* setjump, longjump */
-/*****************************************************************************/
-
-ENTRY(setjmp)
- movl 4(%esp),%eax
- movl %ebx,(%eax) /* save ebx */
- movl %esp,4(%eax) /* save esp */
- movl %ebp,8(%eax) /* save ebp */
- movl %esi,12(%eax) /* save esi */
- movl %edi,16(%eax) /* save edi */
- movl (%esp),%edx /* get rta */
- movl %edx,20(%eax) /* save eip */
- xorl %eax,%eax /* return(0); */
- ret
-
-ENTRY(longjmp)
- movl 4(%esp),%eax
- movl (%eax),%ebx /* restore ebx */
- movl 4(%eax),%esp /* restore esp */
- movl 8(%eax),%ebp /* restore ebp */
- movl 12(%eax),%esi /* restore esi */
- movl 16(%eax),%edi /* restore edi */
- movl 20(%eax),%edx /* get rta */
- movl %edx,(%esp) /* put in return frame */
- xorl %eax,%eax /* return(1); */
- incl %eax
- ret
-
-
-/*****************************************************************************/
-/* Scheduling */
-/*****************************************************************************/
-
-/*
- * The following primitives manipulate the run queues.
- * _whichqs tells which of the 32 queues _qs
- * have processes in them. Setrq puts processes into queues, Remrq
- * removes them from queues. The running process is on no queue,
- * other processes are on a queue related to p->p_pri, divided by 4
- * actually to shrink the 0-127 range of priorities into the 32 available
- * queues.
- */
-
- .globl _whichqs,_qs,_cnt,_panic
- .comm _noproc,4
- .comm _runrun,4
-
-/*
- * Setrq(p)
- *
- * Call should be made at spl6(), and p->p_stat should be SRUN
- */
-ENTRY(setrq)
- movl 4(%esp),%eax
- cmpl $0,P_RLINK(%eax) /* should not be on q already */
- je set1
- pushl $set2
- call _panic
-set1:
- movzbl P_PRI(%eax),%edx
- shrl $2,%edx
- btsl %edx,_whichqs /* set q full bit */
- shll $3,%edx
- addl $_qs,%edx /* locate q hdr */
- movl %edx,P_LINK(%eax) /* link process on tail of q */
- movl P_RLINK(%edx),%ecx
- movl %ecx,P_RLINK(%eax)
- movl %eax,P_RLINK(%edx)
- movl %eax,P_LINK(%ecx)
- ret
-
-set2: .asciz "setrq"
-
-/*
- * Remrq(p)
- *
- * Call should be made at spl6().
- */
-ENTRY(remrq)
- movl 4(%esp),%eax
- movzbl P_PRI(%eax),%edx
- shrl $2,%edx
- btrl %edx,_whichqs /* clear full bit, panic if clear already */
- jb rem1
- pushl $rem3
- call _panic
-rem1:
- pushl %edx
- movl P_LINK(%eax),%ecx /* unlink process */
- movl P_RLINK(%eax),%edx
- movl %edx,P_RLINK(%ecx)
- movl P_RLINK(%eax),%ecx
- movl P_LINK(%eax),%edx
- movl %edx,P_LINK(%ecx)
- popl %edx
- movl $_qs,%ecx
- shll $3,%edx
- addl %edx,%ecx
- cmpl P_LINK(%ecx),%ecx /* q still has something? */
- je rem2
- shrl $3,%edx /* yes, set bit as still full */
- btsl %edx,_whichqs
-rem2:
- movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
- ret
-
-rem3: .asciz "remrq"
-sw0: .asciz "swtch"
-
-/*
- * When no processes are on the runq, Swtch branches to idle
- * to wait for something to come ready.
- */
- ALIGN_TEXT
-Idle:
- sti
- SHOW_STI
-
- ALIGN_TEXT
-idle_loop:
- call _spl0
- cmpl $0,_whichqs
- jne sw1
- hlt /* wait for interrupt */
- jmp idle_loop
-
-badsw:
- pushl $sw0
- call _panic
- /*NOTREACHED*/
-
-/*
- * Swtch()
- */
- SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
-ENTRY(swtch)
-
- incl _cnt+V_SWTCH
-
- /* switch to new process. first, save context as needed */
-
- movl _curproc,%ecx
-
- /* if no process to save, don't bother */
- testl %ecx,%ecx
- je sw1
-
- movl P_ADDR(%ecx),%ecx
-
- movl (%esp),%eax /* Hardware registers */
- movl %eax,PCB_EIP(%ecx)
- movl %ebx,PCB_EBX(%ecx)
- movl %esp,PCB_ESP(%ecx)
- movl %ebp,PCB_EBP(%ecx)
- movl %esi,PCB_ESI(%ecx)
- movl %edi,PCB_EDI(%ecx)
-
-#if NNPX > 0
- /* have we used fp, and need a save? */
- mov _curproc,%eax
- cmp %eax,_npxproc
- jne 1f
- pushl %ecx /* h/w bugs make saving complicated */
- leal PCB_SAVEFPU(%ecx),%eax
- pushl %eax
- call _npxsave /* do it in a big C function */
- popl %eax
- popl %ecx
-1:
-#endif /* NNPX > 0 */
-
- movl _CMAP2,%eax /* save temporary map PTE */
- movl %eax,PCB_CMAP2(%ecx) /* in our context */
- movl $0,_curproc /* out of process */
-
-# movw _cpl,%ax
-# movw %ax,PCB_IML(%ecx) /* save ipl */
-
- /* save is done, now choose a new process or idle */
-sw1:
- cli
- SHOW_CLI
- movl _whichqs,%edi
-2:
- /* XXX - bsf is sloow */
- bsfl %edi,%eax /* find a full q */
- je Idle /* if none, idle */
- /* XX update whichqs? */
-swfnd:
- btrl %eax,%edi /* clear q full status */
- jnb 2b /* if it was clear, look for another */
- movl %eax,%ebx /* save which one we are using */
-
- shll $3,%eax
- addl $_qs,%eax /* select q */
- movl %eax,%esi
-
-#ifdef DIAGNOSTIC
- cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
- je badsw /* not possible */
-#endif
-
- movl P_LINK(%eax),%ecx /* unlink from front of process q */
- movl P_LINK(%ecx),%edx
- movl %edx,P_LINK(%eax)
- movl P_RLINK(%ecx),%eax
- movl %eax,P_RLINK(%edx)
-
- cmpl P_LINK(%ecx),%esi /* q empty */
- je 3f
- btsl %ebx,%edi /* nope, set to indicate full */
-3:
- movl %edi,_whichqs /* update q status */
-
- movl $0,%eax
- movl %eax,_want_resched
-
-#ifdef DIAGNOSTIC
- cmpl %eax,P_WCHAN(%ecx)
- jne badsw
- cmpb $SRUN,P_STAT(%ecx)
- jne badsw
-#endif
-
- movl %eax,P_RLINK(%ecx) /* isolate process to run */
- movl P_ADDR(%ecx),%edx
- movl PCB_CR3(%edx),%ebx
-
- /* switch address space */
- movl %ebx,%cr3
-
- /* restore context */
- movl PCB_EBX(%edx),%ebx
- movl PCB_ESP(%edx),%esp
- movl PCB_EBP(%edx),%ebp
- movl PCB_ESI(%edx),%esi
- movl PCB_EDI(%edx),%edi
- movl PCB_EIP(%edx),%eax
- movl %eax,(%esp)
-
- movl PCB_CMAP2(%edx),%eax /* get temporary map */
- movl %eax,_CMAP2 /* reload temporary map PTE */
-
- movl %ecx,_curproc /* into next process */
- movl %edx,_curpcb
-
- pushl %edx /* save p to return */
-/*
- * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
- * I think restoring the cpl is unnecessary, but we must turn off the cli
- * now that spl*() don't do it as a side affect.
- */
- pushl PCB_IML(%edx)
- sti
- SHOW_STI
-#if 0
- call _splx
-#endif
- addl $4,%esp
-/*
- * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
- * same way. Better return a value.
- */
- popl %eax /* return(p); */
- ret
-
-ENTRY(mvesp)
- movl %esp,%eax
- ret
-/*
- * struct proc *swtch_to_inactive(p) ; struct proc *p;
- *
- * At exit of a process, move off the address space of the
- * process and onto a "safe" one. Then, on a temporary stack
- * return and run code that disposes of the old state.
- * Since this code requires a parameter from the "old" stack,
- * pass it back as a return value.
- */
-ENTRY(swtch_to_inactive)
- popl %edx /* old pc */
- popl %eax /* arg, our return value */
- movl _IdlePTD,%ecx
- movl %ecx,%cr3 /* good bye address space */
- #write buffer?
- movl $tmpstk-4,%esp /* temporary stack, compensated for call */
- jmp %edx /* return, execute remainder of cleanup */
-
-/*
- * savectx(pcb, altreturn)
- * Update pcb, saving current processor state and arranging
- * for alternate return ala longjmp in swtch if altreturn is true.
- */
-ENTRY(savectx)
- movl 4(%esp),%ecx
- movw _cpl,%ax
- movw %ax,PCB_IML(%ecx)
- movl (%esp),%eax
- movl %eax,PCB_EIP(%ecx)
- movl %ebx,PCB_EBX(%ecx)
- movl %esp,PCB_ESP(%ecx)
- movl %ebp,PCB_EBP(%ecx)
- movl %esi,PCB_ESI(%ecx)
- movl %edi,PCB_EDI(%ecx)
-
-#if NNPX > 0
- /*
- * If npxproc == NULL, then the npx h/w state is irrelevant and the
- * state had better already be in the pcb. This is true for forks
- * but not for dumps (the old book-keeping with FP flags in the pcb
- * always lost for dumps because the dump pcb has 0 flags).
- *
- * If npxproc != NULL, then we have to save the npx h/w state to
- * npxproc's pcb and copy it to the requested pcb, or save to the
- * requested pcb and reload. Copying is easier because we would
- * have to handle h/w bugs for reloading. We used to lose the
- * parent's npx state for forks by forgetting to reload.
- */
- mov _npxproc,%eax
- testl %eax,%eax
- je 1f
-
- pushl %ecx
- movl P_ADDR(%eax),%eax
- leal PCB_SAVEFPU(%eax),%eax
- pushl %eax
- pushl %eax
- call _npxsave
- popl %eax
- popl %eax
- popl %ecx
-
- pushl %ecx
- pushl $108+8*2 /* XXX h/w state size + padding */
- leal PCB_SAVEFPU(%ecx),%ecx
- pushl %ecx
- pushl %eax
- call _bcopy
- addl $12,%esp
- popl %ecx
-1:
-#endif /* NNPX > 0 */
-
- movl _CMAP2,%edx /* save temporary map PTE */
- movl %edx,PCB_CMAP2(%ecx) /* in our context */
-
- cmpl $0,8(%esp)
- je 1f
- movl %esp,%edx /* relocate current sp relative to pcb */
- subl $_kstack,%edx /* (sp is relative to kstack): */
- addl %edx,%ecx /* pcb += sp - kstack; */
- movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
- /* this mess deals with replicating register state gcc hides */
- movl 12(%esp),%eax
- movl %eax,12(%ecx)
- movl 16(%esp),%eax
- movl %eax,16(%ecx)
- movl 20(%esp),%eax
- movl %eax,20(%ecx)
- movl 24(%esp),%eax
- movl %eax,24(%ecx)
-1:
- xorl %eax,%eax /* return 0 */
- ret
-
-/*
- * addupc(int pc, struct uprof *up, int ticks):
- * update profiling information for the user process.
- */
-ENTRY(addupc)
- pushl %ebp
- movl %esp,%ebp
- movl 12(%ebp),%edx /* up */
- movl 8(%ebp),%eax /* pc */
-
- subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
- jl L1 /* if (pc < 0) return */
-
- shrl $1,%eax /* praddr = pc >> 1 */
- imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
- shrl $15,%eax /* praddr = praddr << 15 */
- andl $-2,%eax /* praddr &= ~1 */
-
- cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
- ja L1
-
-/* addl %eax,%eax /* praddr -> word offset */
- addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
- movl 16(%ebp),%ecx /* ticks */
-
- movl _curpcb,%edx
- movl $proffault,PCB_ONFAULT(%edx)
- addl %ecx,(%eax) /* storage location += ticks */
- movl $0,PCB_ONFAULT(%edx)
-L1:
- leave
- ret
-
- ALIGN_TEXT
-proffault:
- /* if we get a fault, then kill profiling all together */
- movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
- movl 12(%ebp),%ecx
- movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
- leave
- ret
-
-/* To be done: */
-ENTRY(astoff)
- ret
-
-
-/*****************************************************************************/
-/* Trap handling */
-/*****************************************************************************/
-/*
- * Trap and fault vector routines
- *
- * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
- * control. The sti's give the standard losing behaviour for ddb and kgdb.
- */
-#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
-#define TRAP(a) pushl $(a) ; jmp alltraps
-#ifdef KGDB
-# define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
-#else
-# define BPTTRAP(a) sti; TRAP(a)
-#endif
-
-IDTVEC(div)
- pushl $0; TRAP(T_DIVIDE)
-IDTVEC(dbg)
-#ifdef BDBTRAP
- BDBTRAP(dbg)
-#endif
- pushl $0; BPTTRAP(T_TRCTRAP)
-IDTVEC(nmi)
- pushl $0; TRAP(T_NMI)
-IDTVEC(bpt)
-#ifdef BDBTRAP
- BDBTRAP(bpt)
-#endif
- pushl $0; BPTTRAP(T_BPTFLT)
-IDTVEC(ofl)
- pushl $0; TRAP(T_OFLOW)
-IDTVEC(bnd)
- pushl $0; TRAP(T_BOUND)
-IDTVEC(ill)
- pushl $0; TRAP(T_PRIVINFLT)
-IDTVEC(dna)
- pushl $0; TRAP(T_DNA)
-IDTVEC(dble)
- TRAP(T_DOUBLEFLT)
- /*PANIC("Double Fault");*/
-IDTVEC(fpusegm)
- pushl $0; TRAP(T_FPOPFLT)
-IDTVEC(tss)
- TRAP(T_TSSFLT)
- /*PANIC("TSS not valid");*/
-IDTVEC(missing)
- TRAP(T_SEGNPFLT)
-IDTVEC(stk)
- TRAP(T_STKFLT)
-IDTVEC(prot)
- TRAP(T_PROTFLT)
-IDTVEC(page)
- TRAP(T_PAGEFLT)
-IDTVEC(rsvd)
- pushl $0; TRAP(T_RESERVED)
-IDTVEC(fpu)
-#if NNPX > 0
- /*
- * Handle like an interrupt so that we can call npxintr to clear the
- * error. It would be better to handle npx interrupts as traps but
- * this is difficult for nested interrupts.
- */
- pushl $0 /* dummy error code */
- pushl $T_ASTFLT
- pushal
- nop /* silly, the bug is for popal and it only
- * bites when the next instruction has a
- * complicated address mode */
- pushl %ds
- pushl %es /* now the stack frame is a trap frame */
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- pushl _cpl
- pushl $0 /* dummy unit to finish building intr frame */
- incl _cnt+V_TRAP
- call _npxintr
- jmp doreti
-#else /* NNPX > 0 */
- pushl $0; TRAP(T_ARITHTRAP)
-#endif /* NNPX > 0 */
- /* 17 - 31 reserved for future exp */
-IDTVEC(rsvd0)
- pushl $0; TRAP(17)
-IDTVEC(rsvd1)
- pushl $0; TRAP(18)
-IDTVEC(rsvd2)
- pushl $0; TRAP(19)
-IDTVEC(rsvd3)
- pushl $0; TRAP(20)
-IDTVEC(rsvd4)
- pushl $0; TRAP(21)
-IDTVEC(rsvd5)
- pushl $0; TRAP(22)
-IDTVEC(rsvd6)
- pushl $0; TRAP(23)
-IDTVEC(rsvd7)
- pushl $0; TRAP(24)
-IDTVEC(rsvd8)
- pushl $0; TRAP(25)
-IDTVEC(rsvd9)
- pushl $0; TRAP(26)
-IDTVEC(rsvd10)
- pushl $0; TRAP(27)
-IDTVEC(rsvd11)
- pushl $0; TRAP(28)
-IDTVEC(rsvd12)
- pushl $0; TRAP(29)
-IDTVEC(rsvd13)
- pushl $0; TRAP(30)
-IDTVEC(rsvd14)
- pushl $0; TRAP(31)
-
- SUPERALIGN_TEXT
-alltraps:
- pushal
- nop
- pushl %ds
- pushl %es
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
-calltrap:
- incl _cnt+V_TRAP
- call _trap
- /*
- * Return through doreti to handle ASTs. Have to change trap frame
- * to interrupt frame.
- */
- movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
- pushl _cpl
- pushl $0 /* dummy unit */
- jmp doreti
-
-#ifdef KGDB
-/*
- * This code checks for a kgdb trap, then falls through
- * to the regular trap code.
- */
- SUPERALIGN_TEXT
-bpttraps:
- pushal
- nop
- pushl %es
- pushl %ds
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
- /* non-kernel mode? */
- jne calltrap /* yes */
- call _kgdb_trap_glue
- jmp calltrap
-#endif
-
-/*
- * Call gate entry for syscall
- */
- SUPERALIGN_TEXT
-IDTVEC(syscall)
- pushfl /* only for stupid carry bit and more stupid wait3 cc kludge */
- /* XXX - also for direction flag (bzero, etc. clear it) */
- pushal /* only need eax,ecx,edx - trap resaves others */
- nop
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
- incl _cnt+V_SYSCALL /* kml 3/25/93 */
- call _syscall
- /*
- * Return through doreti to handle ASTs. Have to change syscall frame
- * to interrupt frame.
- *
- * XXX - we should have set up the frame earlier to avoid the
- * following popal/pushal (not much can be done to avoid shuffling
- * the flags). Consistent frames would simplify things all over.
- */
- movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
- movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
- movl 32+8(%esp),%ecx
- movl %ebx,32+0(%esp)
- movl %ecx,32+4(%esp)
- movl %eax,32+8(%esp)
- popal
- nop
- pushl $0 /* dummy error code */
- pushl $T_ASTFLT
- pushal
- nop
- movl __udatasel,%eax /* switch back to user segments */
- pushl %eax /* XXX - better to preserve originals? */
- pushl %eax
- pushl _cpl
- pushl $0
- jmp doreti
-
-#ifdef SHOW_A_LOT
-/*
- * 'show_bits' was too big when defined as a macro. The line length for some
- * enclosing macro was too big for gas. Perhaps the code would have blown
- * the cache anyway.
- */
- ALIGN_TEXT
-show_bits:
- pushl %eax
- SHOW_BIT(0)
- SHOW_BIT(1)
- SHOW_BIT(2)
- SHOW_BIT(3)
- SHOW_BIT(4)
- SHOW_BIT(5)
- SHOW_BIT(6)
- SHOW_BIT(7)
- SHOW_BIT(8)
- SHOW_BIT(9)
- SHOW_BIT(10)
- SHOW_BIT(11)
- SHOW_BIT(12)
- SHOW_BIT(13)
- SHOW_BIT(14)
- SHOW_BIT(15)
- popl %eax
- ret
-
- .data
-bit_colors:
- .byte GREEN,RED,0,0
- .text
-
-#endif /* SHOW_A_LOT */
-
-
-/*
- * include generated interrupt vectors and ISA intr code
- */
-#include "i386/isa/vector.s"
-#include "i386/isa/icu.s"
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 75ace1f..61948b0 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
- * $Id: machdep.c,v 1.14 1993/10/29 09:06:56 davidg Exp $
+ * $Id: machdep.c,v 1.15 1993/11/07 21:47:00 wollman Exp $
*/
#include "npx.h"
@@ -93,6 +93,10 @@ static unsigned int avail_remaining;
#define EXPECT_BASEMEM 640 /* The expected base memory*/
#define INFORM_WAIT 1 /* Set to pause berfore crash in weird cases*/
+#ifndef PANIC_REBOOT_WAIT_TIME
+#define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
+#endif
+
/*
* Declare these as initialized data so we can patch them.
*/
@@ -625,13 +629,34 @@ boot(arghowto)
savectx(&dumppcb, 0);
dumppcb.pcb_ptd = rcr3();
dumpsys();
- /*NOTREACHED*/
+
+ if (PANIC_REBOOT_WAIT_TIME != 0) {
+ if (PANIC_REBOOT_WAIT_TIME != -1) {
+ int loop;
+ printf("Automatic reboot in %d seconds - press a key on the console to abort\n",
+ PANIC_REBOOT_WAIT_TIME);
+ for (loop = PANIC_REBOOT_WAIT_TIME; loop > 0; --loop) {
+ DELAY(1000 * 1000); /* one second */
+ if (sgetc(1)) /* Did user type a key? */
+ break;
+ }
+ if (!loop)
+ goto die;
+ }
+ } else { /* zero time specified - reboot NOW */
+ goto die;
+ }
+ printf("--> Press a key on the console to reboot <--\n");
+ cngetc();
}
}
#ifdef lint
dummy = 0; dummy = dummy;
printf("howto %d, devtype %d\n", arghowto, devtype);
#endif
+die:
+ printf("Rebooting...\n");
+ DELAY (100000); /* wait 100ms for printf's to complete */
cpu_reset();
for(;;) ;
/*NOTREACHED*/
@@ -681,8 +706,6 @@ dumpsys()
printf("succeeded\n");
break;
}
- printf("\n\n");
- DELAY(1000);
}
#ifdef HZ
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index f35dd0f..ed8ff6a 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.6 1993/10/12 15:09:37 rgrimes Exp $
+ * $Id: pmap.c,v 1.7 1993/10/15 10:34:25 rgrimes Exp $
*/
/*
@@ -229,7 +229,7 @@ extern int IdlePTD;
avail_end -= i386_round_page(sizeof(struct msgbuf));
mem_size = physmem << PG_SHIFT;
- virtual_avail = (vm_offset_t)atdevbase + 0x100000 - 0xa0000 + 10*NBPG;
+ virtual_avail = (vm_offset_t)KERNBASE + avail_start;
virtual_end = VM_MAX_KERNEL_ADDRESS;
i386pagesperpage = PAGE_SIZE / NBPG;
@@ -332,10 +332,10 @@ pmap_init(phys_start, phys_end)
(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
&addr, (0x100000-0xa0000), FALSE);
- addr = (vm_offset_t) KERNBASE + KPTphys/* *NBPG */;
+ addr = (vm_offset_t) KERNBASE + IdlePTD;
vm_object_reference(kernel_object);
(void) vm_map_find(kernel_map, kernel_object, addr,
- &addr, 2*NBPG, FALSE);
+ &addr, (NKPDE+4)*NBPG, FALSE);
/*
* Allocate memory for random pmap data structures. Includes the
diff --git a/sys/i386/i386/support.s b/sys/i386/i386/support.s
new file mode 100644
index 0000000..a2ed642
--- /dev/null
+++ b/sys/i386/i386/support.s
@@ -0,0 +1,1031 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "assym.s" /* system definitions */
+#include "errno.h" /* error return codes */
+#include "machine/asmacros.h" /* miscellaneous asm macros */
+
+#define KDSEL 0x10 /* kernel data selector */
+#define IDXSHIFT 10
+
+/*
+ * Support routines for GCC, general C-callable functions
+ */
+ENTRY(__udivsi3)
+ movl 4(%esp),%eax
+ xorl %edx,%edx
+ divl 8(%esp)
+ ret
+
+ENTRY(__divsi3)
+ movl 4(%esp),%eax
+ cltd
+ idivl 8(%esp)
+ ret
+
+ /*
+ * I/O bus instructions via C
+ */
+ENTRY(inb) /* val = inb(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ NOP
+ inb %dx,%al
+ ret
+
+ENTRY(inw) /* val = inw(port) */
+ movl 4(%esp),%edx
+ subl %eax,%eax
+ NOP
+ inw %dx,%ax
+ ret
+
+ENTRY(insb) /* insb(port, addr, cnt) */
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ insb
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(insw) /* insw(port, addr, cnt) */
+ pushl %edi
+ movw 8(%esp),%dx
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ insw
+ NOP
+ movl %edi,%eax
+ popl %edi
+ ret
+
+ENTRY(rtcin) /* rtcin(val) */
+ movl 4(%esp),%eax
+ outb %al,$0x70
+ NOP
+ xorl %eax,%eax
+ inb $0x71,%al
+ ret
+
+ENTRY(outb) /* outb(port, val) */
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outb %al,%dx
+ NOP
+ ret
+
+ENTRY(outw) /* outw(port, val) */
+ movl 4(%esp),%edx
+ NOP
+ movl 8(%esp),%eax
+ outw %ax,%dx
+ NOP
+ ret
+
+ENTRY(outsb) /* outsb(port, addr, cnt) */
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ outsb
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+ENTRY(outsw) /* outsw(port, addr, cnt) */
+ pushl %esi
+ movw 8(%esp),%dx
+ movl 12(%esp),%esi
+ movl 16(%esp),%ecx
+ cld
+ NOP
+ rep
+ outsw
+ NOP
+ movl %esi,%eax
+ popl %esi
+ ret
+
+/*
+ * bcopy family
+ */
+/* void bzero(void *base, u_int cnt) */
+ENTRY(bzero)
+ pushl %edi
+ movl 8(%esp),%edi
+ movl 12(%esp),%ecx
+ xorl %eax,%eax
+ shrl $2,%ecx
+ cld
+ rep
+ stosl
+ movl 12(%esp),%ecx
+ andl $3,%ecx
+ rep
+ stosb
+ popl %edi
+ ret
+
+/* fillw(pat, base, cnt) */
+ENTRY(fillw)
+ pushl %edi
+ movl 8(%esp),%eax
+ movl 12(%esp),%edi
+ movl 16(%esp),%ecx
+ cld
+ rep
+ stosw
+ popl %edi
+ ret
+
+ENTRY(bcopyb)
+bcopyb:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards. */
+ addl %ecx,%esi
+ std
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyw)
+bcopyw:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $1,%ecx /* copy by 16-bit words */
+ rep
+ movsw
+ adc %ecx,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $1,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 16-bit words */
+ shrl $1,%ecx
+ decl %esi
+ decl %edi
+ rep
+ movsw
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ENTRY(bcopyx)
+ movl 16(%esp),%eax
+ cmpl $2,%eax
+ je bcopyw /* not _bcopyw, to avoid multiple mcounts */
+ cmpl $4,%eax
+ je bcopy
+ jmp bcopyb
+
+/*
+ * (ov)bcopy(src, dst, cnt)
+ * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
+ */
+ALTENTRY(ovbcopy)
+ENTRY(bcopy)
+bcopy:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi
+ movl 16(%esp),%edi
+ movl 20(%esp),%ecx
+ cmpl %esi,%edi /* potentially overlapping? */
+ jnb 1f
+ cld /* nope, copy forwards */
+ shrl $2,%ecx /* copy by 32-bit words */
+ rep
+ movsl
+ movl 20(%esp),%ecx
+ andl $3,%ecx /* any bytes left? */
+ rep
+ movsb
+ popl %edi
+ popl %esi
+ ret
+
+ ALIGN_TEXT
+1:
+ addl %ecx,%edi /* copy backwards */
+ addl %ecx,%esi
+ std
+ andl $3,%ecx /* any fractional bytes? */
+ decl %edi
+ decl %esi
+ rep
+ movsb
+ movl 20(%esp),%ecx /* copy remainder by 32-bit words */
+ shrl $2,%ecx
+ subl $3,%esi
+ subl $3,%edi
+ rep
+ movsl
+ popl %edi
+ popl %esi
+ cld
+ ret
+
+ALTENTRY(ntohl)
+ENTRY(htonl)
+ movl 4(%esp),%eax
+#ifdef i486
+/* XXX */
+/* Since Gas 1.38 does not grok bswap this has been coded as the
+ * equivalent bytes. This can be changed back to bswap when we
+ * upgrade to a newer version of Gas */
+ /* bswap %eax */
+ .byte 0x0f
+ .byte 0xc8
+#else
+ xchgb %al,%ah
+ roll $16,%eax
+ xchgb %al,%ah
+#endif
+ ret
+
+ALTENTRY(ntohs)
+ENTRY(htons)
+ movzwl 4(%esp),%eax
+ xchgb %al,%ah
+ ret
+
+/*****************************************************************************/
+/* copyout and fubyte family */
+/*****************************************************************************/
+/*
+ * Access user memory from inside the kernel. These routines and possibly
+ * the math- and DOS emulators should be the only places that do this.
+ *
+ * We have to access the memory with user's permissions, so use a segment
+ * selector with RPL 3. For writes to user space we have to additionally
+ * check the PTE for write permission, because the 386 does not check
+ * write permissions when we are executing with EPL 0. The 486 does check
+ * this if the WP bit is set in CR0, so we can use a simpler version here.
+ *
+ * These routines set curpcb->onfault for the time they execute. When a
+ * protection violation occurs inside the functions, the trap handler
+ * returns to *curpcb->onfault instead of the function.
+ */
+
+
+ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
+ movl _curpcb,%eax
+ movl $copyout_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ pushl %ebx
+ movl 16(%esp),%esi
+ movl 20(%esp),%edi
+ movl 24(%esp),%ebx
+ orl %ebx,%ebx /* anything to do? */
+ jz done_copyout
+
+ /*
+ * Check explicitly for non-user addresses. If 486 write protection
+ * is being used, this check is essential because we are in kernel
+ * mode so the h/w does not provide any protection against writing
+ * kernel addresses.
+ *
+ * Otherwise, it saves having to load and restore %es to get the
+ * usual segment-based protection (the destination segment for movs
+ * is always %es). The other explicit checks for user-writablility
+ * are not quite sufficient. They fail for the user area because
+ * we mapped the user area read/write to avoid having an #ifdef in
+ * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
+ * addresses including 0xff800000 and 0xfc000000). I'm not sure if
+ * this can be fixed. Marking the PTEs supervisor mode and the
+ * PDE's user mode would almost work, but there may be a problem
+ * with the self-referential PDE.
+ */
+ movl %edi,%eax
+ addl %ebx,%eax
+ jc copyout_fault
+#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
+ cmpl $VM_END_USER_ADDRESS,%eax
+ ja copyout_fault
+
+#ifndef USE_486_WRITE_PROTECT
+/*
+ * We have to check each PTE for user write permission.
+ * The checking may cause a page fault, so it is important to set
+ * up everything for return via copyout_fault before here.
+ */
+ /* compute number of pages */
+ movl %edi,%ecx
+ andl $NBPG-1,%ecx
+ addl %ebx,%ecx
+ decl %ecx
+ shrl $IDXSHIFT+2,%ecx
+ incl %ecx
+
+ /* compute PTE offset for start address */
+ movl %edi,%edx
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+
+1: /* check PTE for each page */
+ movb _PTmap(%edx),%al
+ andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
+ cmpb $0x07,%al
+ je 2f
+
+ /* simulate a trap */
+ pushl %edx
+ pushl %ecx
+ shll $IDXSHIFT,%edx
+ pushl %edx
+ call _trapwrite /* trapwrite(addr) */
+ popl %edx
+ popl %ecx
+ popl %edx
+
+ orl %eax,%eax /* if not ok, return EFAULT */
+ jnz copyout_fault
+
+2:
+ addl $4,%edx
+ decl %ecx
+ jnz 1b /* check next page */
+#endif /* ndef USE_486_WRITE_PROTECT */
+
+ /* bcopy(%esi, %edi, %ebx) */
+ cld
+ movl %ebx,%ecx
+ shrl $2,%ecx
+ rep
+ movsl
+ movb %bl,%cl
+ andb $3,%cl /* XXX can we trust the rest of %ecx on clones? */
+ rep
+ movsb
+
+done_copyout:
+ popl %ebx
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyout_fault:
+ popl %ebx
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/* copyin(from_user, to_kernel, len) */
+ENTRY(copyin)
+ movl _curpcb,%eax
+ movl $copyin_fault,PCB_ONFAULT(%eax)
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%esi /* caddr_t from */
+ movl 16(%esp),%edi /* caddr_t to */
+ movl 20(%esp),%ecx /* size_t len */
+
+ movb %cl,%al
+ shrl $2,%ecx /* copy longword-wise */
+ cld
+ gs
+ rep
+ movsl
+ movb %al,%cl
+ andb $3,%cl /* copy remaining bytes */
+ gs
+ rep
+ movsb
+
+ popl %edi
+ popl %esi
+ xorl %eax,%eax
+ movl _curpcb,%edx
+ movl %eax,PCB_ONFAULT(%edx)
+ ret
+
+ ALIGN_TEXT
+copyin_fault:
+ popl %edi
+ popl %esi
+ movl _curpcb,%edx
+ movl $0,PCB_ONFAULT(%edx)
+ movl $EFAULT,%eax
+ ret
+
+/*
+ * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
+ */
+ALTENTRY(fuiword)
+ENTRY(fuword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(fusword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movzwl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(fuibyte)
+ENTRY(fubyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ gs
+ movzbl (%edx),%eax
+ movl $0,PCB_ONFAULT(%ecx)
+ ret
+
+ ALIGN_TEXT
+fusufault:
+ movl _curpcb,%ecx
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ decl %eax
+ ret
+
+/*
+ * su{byte,sword,word}: write a byte(word, longword) to user memory
+ */
+#ifdef USE_486_WRITE_PROTECT
+/*
+ * we only have to set the right segment selector.
+ */
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movl %eax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movw 8(%esp),%ax
+ gs
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movb 8(%esp),%al
+ gs
+ movb %al,(%edx)
+ xorl %eax,%eax
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+
+#else /* USE_486_WRITE_PROTECT */
+/*
+ * here starts the trouble again: check PTE, twice if word crosses
+ * a page boundary.
+ */
+/* XXX - page boundary crossing is not handled yet */
+
+ALTENTRY(suibyte)
+ENTRY(subyte)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movb %al,(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ENTRY(susword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movw %ax,(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+ALTENTRY(suiword)
+ENTRY(suword)
+ movl _curpcb,%ecx
+ movl $fusufault,PCB_ONFAULT(%ecx)
+ movl 4(%esp),%edx
+ movl %edx,%eax
+ shrl $IDXSHIFT,%edx
+ andb $0xfc,%dl
+ movb _PTmap(%edx),%dl
+ andb $0x7,%dl /* must be VALID + USERACC + WRITE */
+ cmpb $0x7,%dl
+ je 1f
+
+ /* simulate a trap */
+ pushl %eax
+ call _trapwrite
+ popl %edx
+ orl %eax,%eax
+ jnz fusufault
+1:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ gs
+ movl %eax,0(%edx)
+ xorl %eax,%eax
+ movl _curpcb,%ecx
+ movl %eax,PCB_ONFAULT(%ecx)
+ ret
+
+#endif /* USE_486_WRITE_PROTECT */
+
+/*
+ * copyoutstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+#ifdef USE_486_WRITE_PROTECT
+
+ENTRY(copyoutstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ /*
+ * gs override doesn't work for stosb. Use the same explicit check
+ * as in copyout(). It's much slower now because it is per-char.
+ * XXX - however, it would be faster to rewrite this function to use
+ * strlen() and copyout().
+ */
+ cmpl $VM_END_USER_ADDRESS,%edi
+ jae cpystrflt
+ lodsb
+ gs
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+#else /* ndef USE_486_WRITE_PROTECT */
+
+ENTRY(copyoutstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+1:
+ /*
+ * It suffices to check that the first byte is in user space, because
+ * we look at a page at a time and the end address is on a page
+ * boundary.
+ */
+ cmpl $VM_END_USER_ADDRESS,%edi
+ jae cpystrflt
+ movl %edi,%eax
+ shrl $IDXSHIFT,%eax
+ andb $0xfc,%al
+ movb _PTmap(%eax),%al
+ andb $7,%al
+ cmpb $7,%al
+ je 2f
+
+ /* simulate trap */
+ pushl %edx
+ pushl %edi
+ call _trapwrite
+ popl %edi
+ popl %edx
+ orl %eax,%eax
+ jnz cpystrflt
+
+2: /* copy up to end of this page */
+ movl %edi,%eax
+ andl $NBPG-1,%eax
+ movl $NBPG,%ecx
+ subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
+ cmpl %ecx,%edx
+ jge 3f
+ movl %edx,%ecx /* ecx = min(ecx, edx) */
+3:
+ orl %ecx,%ecx
+ jz 4f
+ decl %ecx
+ decl %edx
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 3b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+
+4: /* next page */
+ orl %edx,%edx
+ jnz 1b
+
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+#endif /* USE_486_WRITE_PROTECT */
+
+/*
+ * copyinstr(from, to, maxlen, int *lencopied)
+ * copy a string from from to to, stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+ENTRY(copyinstr)
+ pushl %esi
+ pushl %edi
+ movl _curpcb,%ecx
+ movl $cpystrflt,PCB_ONFAULT(%ecx)
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ gs
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+ jmp 6f
+
+cpystrflt:
+ movl $EFAULT,%eax
+6:
+ /* set *lencopied and return %eax */
+ movl _curpcb,%ecx
+ movl $0,PCB_ONFAULT(%ecx)
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 7f
+ movl %ecx,(%edx)
+7:
+ popl %edi
+ popl %esi
+ ret
+
+
+/*
+ * copystr(from, to, maxlen, int *lencopied)
+ */
+ENTRY(copystr)
+ pushl %esi
+ pushl %edi
+
+ movl 12(%esp),%esi /* %esi = from */
+ movl 16(%esp),%edi /* %edi = to */
+ movl 20(%esp),%edx /* %edx = maxlen */
+ incl %edx
+
+1:
+ decl %edx
+ jz 4f
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decl %edx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* edx is zero -- return ENAMETOOLONG */
+ movl $ENAMETOOLONG,%eax
+
+6:
+ /* set *lencopied and return %eax */
+ movl 20(%esp),%ecx
+ subl %edx,%ecx
+ movl 24(%esp),%edx
+ orl %edx,%edx
+ jz 7f
+ movl %ecx,(%edx)
+7:
+ popl %edi
+ popl %esi
+ ret
+
+/*
+ * Handling of special 386 registers and descriptor tables etc
+ */
+/* void lgdt(struct region_descriptor *rdp); */
+ENTRY(lgdt)
+ /* reload the descriptor table */
+ movl 4(%esp),%eax
+ lgdt (%eax)
+
+ /* flush the prefetch q */
+ jmp 1f
+ nop
+1:
+ /* reload "stale" selectors */
+ movl $KDSEL,%eax
+ movl %ax,%ds
+ movl %ax,%es
+ movl %ax,%ss
+
+ /* reload code selector by turning return into intersegmental return */
+ movl (%esp),%eax
+ pushl %eax
+# movl $KCSEL,4(%esp)
+ movl $8,4(%esp)
+ lret
+
+/*
+ * void lidt(struct region_descriptor *rdp);
+ */
+ENTRY(lidt)
+ movl 4(%esp),%eax
+ lidt (%eax)
+ ret
+
+/*
+ * void lldt(u_short sel)
+ */
+ENTRY(lldt)
+ lldt 4(%esp)
+ ret
+
+/*
+ * void ltr(u_short sel)
+ */
+ENTRY(ltr)
+ ltr 4(%esp)
+ ret
+
+/* ssdtosd(*ssdp,*sdp) */
+ENTRY(ssdtosd)
+ pushl %ebx
+ movl 8(%esp),%ecx
+ movl 8(%ecx),%ebx
+ shll $16,%ebx
+ movl (%ecx),%edx
+ roll $16,%edx
+ movb %dh,%bl
+ movb %dl,%bh
+ rorl $8,%ebx
+ movl 4(%ecx),%eax
+ movw %ax,%dx
+ andl $0xf0000,%eax
+ orl %eax,%ebx
+ movl 12(%esp),%ecx
+ movl %edx,(%ecx)
+ movl %ebx,4(%ecx)
+ popl %ebx
+ ret
+
+
+/* tlbflush() */
+ENTRY(tlbflush)
+ movl %cr3,%eax
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+
+/* load_cr0(cr0) */
+ENTRY(load_cr0)
+ movl 4(%esp),%eax
+ movl %eax,%cr0
+ ret
+
+
+/* rcr0() */
+ENTRY(rcr0)
+ movl %cr0,%eax
+ ret
+
+
+/* rcr2() */
+ENTRY(rcr2)
+ movl %cr2,%eax
+ ret
+
+
+/* rcr3() */
+ENTRY(rcr3)
+ movl %cr3,%eax
+ ret
+
+
+/* void load_cr3(caddr_t cr3) */
+ENTRY(load_cr3)
+ movl 4(%esp),%eax
+ orl $I386_CR3PAT,%eax
+ movl %eax,%cr3
+ ret
+
+
+/*****************************************************************************/
+/* setjump, longjump */
+/*****************************************************************************/
+
+ENTRY(setjmp)
+ movl 4(%esp),%eax
+ movl %ebx,(%eax) /* save ebx */
+ movl %esp,4(%eax) /* save esp */
+ movl %ebp,8(%eax) /* save ebp */
+ movl %esi,12(%eax) /* save esi */
+ movl %edi,16(%eax) /* save edi */
+ movl (%esp),%edx /* get rta */
+ movl %edx,20(%eax) /* save eip */
+ xorl %eax,%eax /* return(0); */
+ ret
+
+ENTRY(longjmp)
+ movl 4(%esp),%eax
+ movl (%eax),%ebx /* restore ebx */
+ movl 4(%eax),%esp /* restore esp */
+ movl 8(%eax),%ebp /* restore ebp */
+ movl 12(%eax),%esi /* restore esi */
+ movl 16(%eax),%edi /* restore edi */
+ movl 20(%eax),%edx /* get rta */
+ movl %edx,(%esp) /* put in return frame */
+ xorl %eax,%eax /* return(1); */
+ incl %eax
+ ret
+
diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s
new file mode 100644
index 0000000..ec6e8bc
--- /dev/null
+++ b/sys/i386/i386/swtch.s
@@ -0,0 +1,435 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "npx.h" /* for NNPX */
+#include "assym.s" /* for preprocessor defines */
+#include "errno.h" /* for error codes */
+
+#include "i386/isa/debug.h" /* for SHOW macros */
+#include "machine/asmacros.h" /* for miscellaneous assembly macros */
+
+/*****************************************************************************/
+/* Scheduling */
+/*****************************************************************************/
+
+/*
+ * The following primitives manipulate the run queues.
+ * _whichqs tells which of the 32 queues _qs
+ * have processes in them. Setrq puts processes into queues, Remrq
+ * removes them from queues. The running process is on no queue,
+ * other processes are on a queue related to p->p_pri, divided by 4
+ * actually to shrink the 0-127 range of priorities into the 32 available
+ * queues.
+ */
+ .data
+ .globl _curpcb, _whichqs
+_curpcb: .long 0 /* pointer to curproc's PCB area */
+_whichqs: .long 0 /* which run queues have data */
+
+ .globl _qs,_cnt,_panic
+ .comm _noproc,4
+ .comm _runrun,4
+
+ .globl _want_resched
+_want_resched: .long 0 /* we need to re-run the scheduler */
+
+ .text
+/*
+ * Setrq(p)
+ *
+ * Call should be made at spl6(), and p->p_stat should be SRUN
+ */
+ENTRY(setrq)
+ movl 4(%esp),%eax
+ cmpl $0,P_RLINK(%eax) /* should not be on q already */
+ je set1
+ pushl $set2
+ call _panic
+set1:
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btsl %edx,_whichqs /* set q full bit */
+ shll $3,%edx
+ addl $_qs,%edx /* locate q hdr */
+ movl %edx,P_LINK(%eax) /* link process on tail of q */
+ movl P_RLINK(%edx),%ecx
+ movl %ecx,P_RLINK(%eax)
+ movl %eax,P_RLINK(%edx)
+ movl %eax,P_LINK(%ecx)
+ ret
+
+set2: .asciz "setrq"
+
+/*
+ * Remrq(p)
+ *
+ * Call should be made at spl6().
+ */
+ENTRY(remrq)
+ movl 4(%esp),%eax
+ movzbl P_PRI(%eax),%edx
+ shrl $2,%edx
+ btrl %edx,_whichqs /* clear full bit, panic if clear already */
+ jb rem1
+ pushl $rem3
+ call _panic
+rem1:
+ pushl %edx
+ movl P_LINK(%eax),%ecx /* unlink process */
+ movl P_RLINK(%eax),%edx
+ movl %edx,P_RLINK(%ecx)
+ movl P_RLINK(%eax),%ecx
+ movl P_LINK(%eax),%edx
+ movl %edx,P_LINK(%ecx)
+ popl %edx
+ movl $_qs,%ecx
+ shll $3,%edx
+ addl %edx,%ecx
+ cmpl P_LINK(%ecx),%ecx /* q still has something? */
+ je rem2
+ shrl $3,%edx /* yes, set bit as still full */
+ btsl %edx,_whichqs
+rem2:
+ movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
+ ret
+
+rem3: .asciz "remrq"
+sw0: .asciz "swtch"
+
+/*
+ * When no processes are on the runq, Swtch branches to idle
+ * to wait for something to come ready.
+ */
+ ALIGN_TEXT
+Idle:
+ sti
+ SHOW_STI
+
+ ALIGN_TEXT
+idle_loop:
+ call _spl0
+ cmpl $0,_whichqs
+ jne sw1
+ hlt /* wait for interrupt */
+ jmp idle_loop
+
+badsw:
+ pushl $sw0
+ call _panic
+ /*NOTREACHED*/
+
+/*
+ * Swtch()
+ */
+ SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
+ENTRY(swtch)
+
+ incl _cnt+V_SWTCH
+
+ /* switch to new process. first, save context as needed */
+
+ movl _curproc,%ecx
+
+ /* if no process to save, don't bother */
+ testl %ecx,%ecx
+ je sw1
+
+ movl P_ADDR(%ecx),%ecx
+
+ movl (%esp),%eax /* Hardware registers */
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /* have we used fp, and need a save? */
+ mov _curproc,%eax
+ cmp %eax,_npxproc
+ jne 1f
+ pushl %ecx /* h/w bugs make saving complicated */
+ leal PCB_SAVEFPU(%ecx),%eax
+ pushl %eax
+ call _npxsave /* do it in a big C function */
+ popl %eax
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%eax /* save temporary map PTE */
+ movl %eax,PCB_CMAP2(%ecx) /* in our context */
+ movl $0,_curproc /* out of process */
+
+# movw _cpl,%ax
+# movw %ax,PCB_IML(%ecx) /* save ipl */
+
+ /* save is done, now choose a new process or idle */
+sw1:
+ cli
+ SHOW_CLI
+ movl _whichqs,%edi
+2:
+ /* XXX - bsf is sloow */
+ bsfl %edi,%eax /* find a full q */
+ je Idle /* if none, idle */
+ /* XX update whichqs? */
+swfnd:
+ btrl %eax,%edi /* clear q full status */
+ jnb 2b /* if it was clear, look for another */
+ movl %eax,%ebx /* save which one we are using */
+
+ shll $3,%eax
+ addl $_qs,%eax /* select q */
+ movl %eax,%esi
+
+#ifdef DIAGNOSTIC
+ cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
+ je badsw /* not possible */
+#endif
+
+ movl P_LINK(%eax),%ecx /* unlink from front of process q */
+ movl P_LINK(%ecx),%edx
+ movl %edx,P_LINK(%eax)
+ movl P_RLINK(%ecx),%eax
+ movl %eax,P_RLINK(%edx)
+
+ cmpl P_LINK(%ecx),%esi /* q empty */
+ je 3f
+ btsl %ebx,%edi /* nope, set to indicate full */
+3:
+ movl %edi,_whichqs /* update q status */
+
+ movl $0,%eax
+ movl %eax,_want_resched
+
+#ifdef DIAGNOSTIC
+ cmpl %eax,P_WCHAN(%ecx)
+ jne badsw
+ cmpb $SRUN,P_STAT(%ecx)
+ jne badsw
+#endif
+
+ movl %eax,P_RLINK(%ecx) /* isolate process to run */
+ movl P_ADDR(%ecx),%edx
+ movl PCB_CR3(%edx),%ebx
+
+ /* switch address space */
+ movl %ebx,%cr3
+
+ /* restore context */
+ movl PCB_EBX(%edx),%ebx
+ movl PCB_ESP(%edx),%esp
+ movl PCB_EBP(%edx),%ebp
+ movl PCB_ESI(%edx),%esi
+ movl PCB_EDI(%edx),%edi
+ movl PCB_EIP(%edx),%eax
+ movl %eax,(%esp)
+
+ movl PCB_CMAP2(%edx),%eax /* get temporary map */
+ movl %eax,_CMAP2 /* reload temporary map PTE */
+
+ movl %ecx,_curproc /* into next process */
+ movl %edx,_curpcb
+
+ pushl %edx /* save p to return */
+/*
+ * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
+ * I think restoring the cpl is unnecessary, but we must turn off the cli
+ * now that spl*() don't do it as a side affect.
+ */
+ pushl PCB_IML(%edx)
+ sti
+ SHOW_STI
+#if 0
+ call _splx
+#endif
+ addl $4,%esp
+/*
+ * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
+ * same way. Better return a value.
+ */
+ popl %eax /* return(p); */
+ ret
+
+ENTRY(mvesp)
+ movl %esp,%eax
+ ret
+/*
+ * struct proc *swtch_to_inactive(p) ; struct proc *p;
+ *
+ * At exit of a process, move off the address space of the
+ * process and onto a "safe" one. Then, on a temporary stack
+ * return and run code that disposes of the old state.
+ * Since this code requires a parameter from the "old" stack,
+ * pass it back as a return value.
+ */
+ENTRY(swtch_to_inactive)
+ popl %edx /* old pc */
+ popl %eax /* arg, our return value */
+ movl _IdlePTD,%ecx
+ movl %ecx,%cr3 /* good bye address space */
+ #write buffer?
+ movl $tmpstk-4,%esp /* temporary stack, compensated for call */
+ jmp %edx /* return, execute remainder of cleanup */
+
+/*
+ * savectx(pcb, altreturn)
+ * Update pcb, saving current processor state and arranging
+ * for alternate return ala longjmp in swtch if altreturn is true.
+ */
+ENTRY(savectx)
+ movl 4(%esp),%ecx
+ movw _cpl,%ax
+ movw %ax,PCB_IML(%ecx)
+ movl (%esp),%eax
+ movl %eax,PCB_EIP(%ecx)
+ movl %ebx,PCB_EBX(%ecx)
+ movl %esp,PCB_ESP(%ecx)
+ movl %ebp,PCB_EBP(%ecx)
+ movl %esi,PCB_ESI(%ecx)
+ movl %edi,PCB_EDI(%ecx)
+
+#if NNPX > 0
+ /*
+ * If npxproc == NULL, then the npx h/w state is irrelevant and the
+ * state had better already be in the pcb. This is true for forks
+ * but not for dumps (the old book-keeping with FP flags in the pcb
+ * always lost for dumps because the dump pcb has 0 flags).
+ *
+ * If npxproc != NULL, then we have to save the npx h/w state to
+ * npxproc's pcb and copy it to the requested pcb, or save to the
+ * requested pcb and reload. Copying is easier because we would
+ * have to handle h/w bugs for reloading. We used to lose the
+ * parent's npx state for forks by forgetting to reload.
+ */
+ mov _npxproc,%eax
+ testl %eax,%eax
+ je 1f
+
+ pushl %ecx
+ movl P_ADDR(%eax),%eax
+ leal PCB_SAVEFPU(%eax),%eax
+ pushl %eax
+ pushl %eax
+ call _npxsave
+ popl %eax
+ popl %eax
+ popl %ecx
+
+ pushl %ecx
+ pushl $108+8*2 /* XXX h/w state size + padding */
+ leal PCB_SAVEFPU(%ecx),%ecx
+ pushl %ecx
+ pushl %eax
+ call _bcopy
+ addl $12,%esp
+ popl %ecx
+1:
+#endif /* NNPX > 0 */
+
+ movl _CMAP2,%edx /* save temporary map PTE */
+ movl %edx,PCB_CMAP2(%ecx) /* in our context */
+
+ cmpl $0,8(%esp)
+ je 1f
+ movl %esp,%edx /* relocate current sp relative to pcb */
+ subl $_kstack,%edx /* (sp is relative to kstack): */
+ addl %edx,%ecx /* pcb += sp - kstack; */
+ movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
+
+/* this mess deals with replicating register state gcc hides */
+ movl 12(%esp),%eax
+ movl %eax,12(%ecx)
+ movl 16(%esp),%eax
+ movl %eax,16(%ecx)
+ movl 20(%esp),%eax
+ movl %eax,20(%ecx)
+ movl 24(%esp),%eax
+ movl %eax,24(%ecx)
+1:
+ xorl %eax,%eax /* return 0 */
+ ret
+
+/*
+ * addupc(int pc, struct uprof *up, int ticks):
+ * update profiling information for the user process.
+ */
+ENTRY(addupc)
+ pushl %ebp
+ movl %esp,%ebp
+ movl 12(%ebp),%edx /* up */
+ movl 8(%ebp),%eax /* pc */
+
+ subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
+ jl L1 /* if (pc < 0) return */
+
+ shrl $1,%eax /* praddr = pc >> 1 */
+ imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
+ shrl $15,%eax /* praddr = praddr << 15 */
+ andl $-2,%eax /* praddr &= ~1 */
+
+ cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
+ ja L1
+
+/* addl %eax,%eax /* praddr -> word offset */
+ addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
+ movl 16(%ebp),%ecx /* ticks */
+
+ movl _curpcb,%edx
+ movl $proffault,PCB_ONFAULT(%edx)
+ addl %ecx,(%eax) /* storage location += ticks */
+ movl $0,PCB_ONFAULT(%edx)
+L1:
+ leave
+ ret
+
+ ALIGN_TEXT
+proffault:
+ /* if we get a fault, then kill profiling all together */
+ movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
+ movl 12(%ebp),%ecx
+ movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
+ leave
+ ret
+
+/* To be done: */
+ENTRY(astoff)
+ ret
+
diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c
index c224396..92247dd 100644
--- a/sys/i386/i386/trap.c
+++ b/sys/i386/i386/trap.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
- * $Id: trap.c,v 1.5 1993/11/01 11:51:29 chmr Exp $
+ * $Id: trap.c,v 1.6 1993/11/04 15:05:41 davidg Exp $
*/
/*
@@ -85,6 +85,38 @@ int dostacklimits;
unsigned rcr2();
extern short cpl;
+#define MAX_TRAP_MSG 27
+char *trap_msg[] = {
+ "reserved addressing fault", /* 0 T_RESADFLT */
+ "privileged instruction fault", /* 1 T_PRIVINFLT */
+ "reserved operand fault", /* 2 T_RESOPFLT */
+ "breakpoint instruction fault", /* 3 T_BPTFLT */
+ "", /* 4 unused */
+ "system call trap", /* 5 T_SYSCALL */
+ "arithmetic trap", /* 6 T_ARITHTRAP */
+ "system forced exception", /* 7 T_ASTFLT */
+ "segmentation (limit) fault", /* 8 T_SEGFLT */
+ "protection fault", /* 9 T_PROTFLT */
+ "trace trap", /* 10 T_TRCTRAP */
+ "", /* 11 unused */
+ "page fault", /* 12 T_PAGEFLT */
+ "page table fault", /* 13 T_TABLEFLT */
+ "alignment fault", /* 14 T_ALIGNFLT */
+ "kernel stack pointer not valid", /* 15 T_KSPNOTVAL */
+ "bus error", /* 16 T_BUSERR */
+ "kernel debugger fault", /* 17 T_KDBTRAP */
+ "integer divide fault", /* 18 T_DIVIDE */
+ "non-maskable interrupt trap", /* 19 T_NMI */
+ "overflow trap", /* 20 T_OFLOW */
+ "FPU bounds check fault", /* 21 T_BOUND */
+ "FPU device not available", /* 22 T_DNA */
+ "double fault", /* 23 T_DOUBLEFLT */
+ "FPU operand fetch fault", /* 24 T_FPOPFLT */
+ "invalid TSS fault", /* 25 T_TSSFLT */
+ "segment not present fault", /* 26 T_SEGNPFLT */
+ "stack fault", /* 27 T_STKFLT */
+};
+
/*
* trap(frame):
@@ -165,13 +197,23 @@ copyfault:
return;
#endif
- printf("trap type %d code = %x eip = %x cs = %x eflags = %x ",
+ if ((type & ~T_USER) <= MAX_TRAP_MSG)
+ printf("\n\nFatal trap %d: %s while in %s mode\n",
+ type & ~T_USER, trap_msg[type & ~T_USER],
+ (type & T_USER) ? "user" : "kernel");
+
+ printf("trap type = %d, code = %x\n eip = %x, cs = %x, eflags = %x, ",
frame.tf_trapno, frame.tf_err, frame.tf_eip,
frame.tf_cs, frame.tf_eflags);
- eva = rcr2();
- printf("cr2 %x cpl %x\n", eva, cpl);
- /* type &= ~T_USER; */ /* XXX what the hell is this */
- panic("trap");
+ eva = rcr2();
+ printf("cr2 = %x, current priority = %x\n", eva, cpl);
+
+ type &= ~T_USER;
+ if (type <= MAX_TRAP_MSG)
+ panic(trap_msg[type]);
+ else
+ panic("unknown/reserved trap");
+
/*NOTREACHED*/
case T_SEGNPFLT|T_USER:
diff --git a/sys/i386/include/asmacros.h b/sys/i386/include/asmacros.h
new file mode 100644
index 0000000..f0f2c01
--- /dev/null
+++ b/sys/i386/include/asmacros.h
@@ -0,0 +1,43 @@
+#define ALIGN_DATA .align 2 /* 4 byte alignment, zero filled */
+#define ALIGN_TEXT .align 2,0x90 /* 4-byte alignment, nop filled */
+#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte alignment (better for 486), nop filled */
+
+#define GEN_ENTRY(name) ALIGN_TEXT; .globl name; name:
+#define NON_GPROF_ENTRY(name) GEN_ENTRY(_/**/name)
+
+#ifdef GPROF
+/*
+ * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
+ * over the mcounting.
+ */
+#define ALTENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
+#define ENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; 2:
+/*
+ * The call to mcount supports the usual (bad) conventions. We allocate
+ * some data and pass a pointer to it although the FreeBSD doesn't use
+ * the data. We set up a frame before calling mcount because that is
+ * the standard convention although it makes work for both mcount and
+ * callers.
+ */
+#define MCOUNT .data; ALIGN_DATA; 1:; .long 0; .text; \
+ pushl %ebp; movl %esp,%ebp; \
+ movl $1b,%eax; call mcount; popl %ebp
+#else
+/*
+ * ALTENTRY() has to align because it is before a corresponding ENTRY().
+ * ENTRY() has to align to because there may be no ALTENTRY() before it.
+ * If there is a previous ALTENTRY() then the alignment code is empty.
+ */
+#define ALTENTRY(name) GEN_ENTRY(_/**/name)
+#define ENTRY(name) GEN_ENTRY(_/**/name)
+
+#endif
+
+#ifdef DUMMY_NOPS /* this will break some older machines */
+#define FASTER_NOP
+#define NOP
+#else
+#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
+#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
+#endif
+
diff --git a/sys/i386/include/param.h b/sys/i386/include/param.h
index 65d72b2..c9e77b2 100644
--- a/sys/i386/include/param.h
+++ b/sys/i386/include/param.h
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)param.h 5.8 (Berkeley) 6/28/91
- * $Id: param.h,v 1.7 1993/10/15 10:07:43 rgrimes Exp $
+ * $Id: param.h,v 1.8 1993/11/07 17:42:58 wollman Exp $
*/
#ifndef _MACHINE_PARAM_H_
@@ -74,8 +74,6 @@
#define KERNBASE 0xFE000000 /* start of kernel virtual */
#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
-#define KERNSIZE 0x00C00000 /* size of kernel virtual */
-
#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
#define DEV_BSIZE (1 << DEV_BSHIFT)
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index f9baf22..9feb23c 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
- * $Id: pmap.h,v 1.4 1993/10/15 10:07:44 rgrimes Exp $
+ * $Id: pmap.h,v 1.5 1993/11/07 17:43:02 wollman Exp $
*/
#ifndef _PMAP_MACHINE_
@@ -118,11 +118,11 @@ typedef struct pde pd_entry_t; /* page directory entry */
typedef struct pte pt_entry_t; /* Mach page table entry */
/*
- * NKPDE controls the virtual space of the kernel, what ever is left is
- * given to the user (NUPDE)
+ * NKPDE controls the virtual space of the kernel, what ever is left, minus
+ * the alternate page table area is given to the user (NUPDE)
*/
#define NKPDE 7 /* number of kernel pde's */
-#define NUPDE (NPTEPG-NKPDE) /* number of user pde's */
+#define NUPDE (NPTEPG-NKPDE-1)/* number of user pde's */
/*
* The *PTDI values control the layout of virtual memory
*
@@ -132,7 +132,11 @@ typedef struct pte pt_entry_t; /* Mach page table entry */
#define APTDPTDI (NPTEPG-1) /* alt ptd entry that points to APTD */
#define KPTDI (APTDPTDI-NKPDE)/* start of kernel virtual pde's */
#define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
-#define UPTDI (PTDPTDI-1) /* ptd entry for u./kernel&user stack */
+#define KSTKPTDI (PTDPTDI-1) /* ptd entry for u./kernel&user stack */
+#define KSTKPTEOFF (NBPG/sizeof(struct pde)-UPAGES) /* pte entry for kernel stack */
+
+#define PDESIZE sizeof(struct pde) /* for assembly files */
+#define PTESIZE sizeof(struct pte) /* for assembly files */
/*
* Address of current and alternate address space page table maps
diff --git a/sys/i386/isa/icu.s b/sys/i386/isa/icu.s
index 22cec58..d424b1b 100644
--- a/sys/i386/isa/icu.s
+++ b/sys/i386/isa/icu.s
@@ -36,7 +36,7 @@
*
* @(#)icu.s 7.2 (Berkeley) 5/21/91
*
- * $Id$
+ * $Id: icu.s,v 1.3 1993/09/06 16:12:03 rgrimes Exp $
*/
/*
@@ -55,20 +55,28 @@
#define SOFTCLOCKMASK 0x8000
.data
+
.globl _cpl
-_cpl: .long 0xffff # current priority (all off)
+_cpl: .long 0xffff /* current priority (all off) */
+
.globl _imen
-_imen: .long 0xffff # interrupt mask enable (all off)
+_imen: .long 0xffff /* interrupt mask enable (all off) */
+
/* .globl _highmask */
_highmask: .long HIGHMASK
- .globl _ttymask
+
+ .globl _ttymask, _biomask, _netmask
_ttymask: .long 0
- .globl _biomask
_biomask: .long 0
- .globl _netmask
_netmask: .long 0
- .globl _ipending
+
+ .globl _ipending, _astpending
_ipending: .long 0
+_astpending: .long 0 /* tells us an AST needs to be taken */
+
+ .globl _netisr
+_netisr: .long 0 /* set with bits for which queue to service */
+
vec:
.long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7
.long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index c224396..92247dd 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
- * $Id: trap.c,v 1.5 1993/11/01 11:51:29 chmr Exp $
+ * $Id: trap.c,v 1.6 1993/11/04 15:05:41 davidg Exp $
*/
/*
@@ -85,6 +85,38 @@ int dostacklimits;
unsigned rcr2();
extern short cpl;
+#define MAX_TRAP_MSG 27
+char *trap_msg[] = {
+ "reserved addressing fault", /* 0 T_RESADFLT */
+ "privileged instruction fault", /* 1 T_PRIVINFLT */
+ "reserved operand fault", /* 2 T_RESOPFLT */
+ "breakpoint instruction fault", /* 3 T_BPTFLT */
+ "", /* 4 unused */
+ "system call trap", /* 5 T_SYSCALL */
+ "arithmetic trap", /* 6 T_ARITHTRAP */
+ "system forced exception", /* 7 T_ASTFLT */
+ "segmentation (limit) fault", /* 8 T_SEGFLT */
+ "protection fault", /* 9 T_PROTFLT */
+ "trace trap", /* 10 T_TRCTRAP */
+ "", /* 11 unused */
+ "page fault", /* 12 T_PAGEFLT */
+ "page table fault", /* 13 T_TABLEFLT */
+ "alignment fault", /* 14 T_ALIGNFLT */
+ "kernel stack pointer not valid", /* 15 T_KSPNOTVAL */
+ "bus error", /* 16 T_BUSERR */
+ "kernel debugger fault", /* 17 T_KDBTRAP */
+ "integer divide fault", /* 18 T_DIVIDE */
+ "non-maskable interrupt trap", /* 19 T_NMI */
+ "overflow trap", /* 20 T_OFLOW */
+ "FPU bounds check fault", /* 21 T_BOUND */
+ "FPU device not available", /* 22 T_DNA */
+ "double fault", /* 23 T_DOUBLEFLT */
+ "FPU operand fetch fault", /* 24 T_FPOPFLT */
+ "invalid TSS fault", /* 25 T_TSSFLT */
+ "segment not present fault", /* 26 T_SEGNPFLT */
+ "stack fault", /* 27 T_STKFLT */
+};
+
/*
* trap(frame):
@@ -165,13 +197,23 @@ copyfault:
return;
#endif
- printf("trap type %d code = %x eip = %x cs = %x eflags = %x ",
+ if ((type & ~T_USER) <= MAX_TRAP_MSG)
+ printf("\n\nFatal trap %d: %s while in %s mode\n",
+ type & ~T_USER, trap_msg[type & ~T_USER],
+ (type & T_USER) ? "user" : "kernel");
+
+ printf("trap type = %d, code = %x\n eip = %x, cs = %x, eflags = %x, ",
frame.tf_trapno, frame.tf_err, frame.tf_eip,
frame.tf_cs, frame.tf_eflags);
- eva = rcr2();
- printf("cr2 %x cpl %x\n", eva, cpl);
- /* type &= ~T_USER; */ /* XXX what the hell is this */
- panic("trap");
+ eva = rcr2();
+ printf("cr2 = %x, current priority = %x\n", eva, cpl);
+
+ type &= ~T_USER;
+ if (type <= MAX_TRAP_MSG)
+ panic(trap_msg[type]);
+ else
+ panic("unknown/reserved trap");
+
/*NOTREACHED*/
case T_SEGNPFLT|T_USER:
OpenPOWER on IntegriCloud