summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorrgrimes <rgrimes@FreeBSD.org>1994-05-25 09:21:21 +0000
committerrgrimes <rgrimes@FreeBSD.org>1994-05-25 09:21:21 +0000
commit2469c867a164210ce96143517059f21db7f1fd17 (patch)
tree9179427ac860211c445df663fd2b86267366bfba /sys/amd64
parentcb0aba89af15a48e2655e898a503946ac4cb42ae (diff)
downloadFreeBSD-src-2469c867a164210ce96143517059f21db7f1fd17.zip
FreeBSD-src-2469c867a164210ce96143517059f21db7f1fd17.tar.gz
The big 4.4BSD Lite to FreeBSD 2.0.0 (Development) patch.
Reviewed by: Rodney W. Grimes Submitted by: John Dyson and David Greenman
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/cpu_switch.S24
-rw-r--r--sys/amd64/amd64/db_interface.c2
-rw-r--r--sys/amd64/amd64/db_trace.c3
-rw-r--r--sys/amd64/amd64/fpu.c2
-rw-r--r--sys/amd64/amd64/genassym.c40
-rw-r--r--sys/amd64/amd64/locore.S81
-rw-r--r--sys/amd64/amd64/locore.s81
-rw-r--r--sys/amd64/amd64/machdep.c219
-rw-r--r--sys/amd64/amd64/mem.c51
-rw-r--r--sys/amd64/amd64/pmap.c76
-rw-r--r--sys/amd64/amd64/support.S12
-rw-r--r--sys/amd64/amd64/support.s12
-rw-r--r--sys/amd64/amd64/swtch.s24
-rw-r--r--sys/amd64/amd64/trap.c157
-rw-r--r--sys/amd64/amd64/tsc.c40
-rw-r--r--sys/amd64/amd64/vm_machdep.c185
-rw-r--r--sys/amd64/include/cpu.h34
-rw-r--r--sys/amd64/include/cpufunc.h139
-rw-r--r--sys/amd64/include/exec.h93
-rw-r--r--sys/amd64/include/frame.h26
-rw-r--r--sys/amd64/include/pcb.h7
-rw-r--r--sys/amd64/include/pmap.h72
-rw-r--r--sys/amd64/include/proc.h4
-rw-r--r--sys/amd64/include/reg.h16
-rw-r--r--sys/amd64/include/signal.h28
-rw-r--r--sys/amd64/include/vmparam.h14
-rw-r--r--sys/amd64/isa/clock.c40
-rw-r--r--sys/amd64/isa/isa.c1
-rw-r--r--sys/amd64/isa/npx.c2
29 files changed, 771 insertions, 714 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index 4dbc672..aa8b5ba 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -52,9 +52,9 @@
/*
* The following primitives manipulate the run queues.
* _whichqs tells which of the 32 queues _qs
- * have processes in them. Setrq puts processes into queues, Remrq
+ * have processes in them. setrunqueue puts processes into queues, Remrq
* removes them from queues. The running process is on no queue,
- * other processes are on a queue related to p->p_pri, divided by 4
+ * other processes are on a queue related to p->p_priority, divided by 4
* actually to shrink the 0-127 range of priorities into the 32 available
* queues.
*/
@@ -72,11 +72,11 @@ _want_resched: .long 0 /* we need to re-run the scheduler */
.text
/*
- * Setrq(p)
+ * setrunqueue(p)
*
* Call should be made at spl6(), and p->p_stat should be SRUN
*/
-ENTRY(setrq)
+ENTRY(setrunqueue)
movl 4(%esp),%eax
cmpl $0,P_RLINK(%eax) /* should not be on q already */
je set1
@@ -95,7 +95,7 @@ set1:
movl %eax,P_LINK(%ecx)
ret
-set2: .asciz "setrq"
+set2: .asciz "setrunqueue"
/*
* Remrq(p)
@@ -131,10 +131,10 @@ rem2:
ret
rem3: .asciz "remrq"
-sw0: .asciz "swtch"
+sw0: .asciz "cpu_switch"
/*
- * When no processes are on the runq, swtch() branches to _idle
+ * When no processes are on the runq, cpu_switch() branches to _idle
* to wait for something to come ready.
*/
ALIGN_TEXT
@@ -146,8 +146,8 @@ _idle:
sti
/*
- * XXX callers of swtch() do a bogus splclock(). Locking should
- * be left to swtch().
+ * XXX callers of cpu_switch() do a bogus splclock(). Locking should
+ * be left to cpu_switch().
*/
movl $SWI_AST_MASK,_cpl
testl $~SWI_AST_MASK,_ipending
@@ -169,9 +169,9 @@ badsw:
/*NOTREACHED*/
/*
- * Swtch()
+ * cpu_switch()
*/
-ENTRY(swtch)
+ENTRY(cpu_switch)
incl _cnt+V_SWTCH
/* switch to new process. first, save context as needed */
@@ -340,7 +340,7 @@ ENTRY(swtch_to_inactive)
/*
* savectx(pcb, altreturn)
* Update pcb, saving current processor state and arranging
- * for alternate return ala longjmp in swtch if altreturn is true.
+ * for alternate return ala longjmp in cpu_switch if altreturn is true.
*/
ENTRY(savectx)
movl 4(%esp),%ecx
diff --git a/sys/amd64/amd64/db_interface.c b/sys/amd64/amd64/db_interface.c
index 5f7c9d5..e79a2ae 100644
--- a/sys/amd64/amd64/db_interface.c
+++ b/sys/amd64/amd64/db_interface.c
@@ -36,7 +36,7 @@
#include "ddb/ddb.h"
#include <sys/reboot.h>
-#include <vm/vm_statistics.h>
+/* #include <vm/vm_statistics.h> */
#include <vm/pmap.h>
#include <setjmp.h>
diff --git a/sys/amd64/amd64/db_trace.c b/sys/amd64/amd64/db_trace.c
index c7c2cd8..d536d94 100644
--- a/sys/amd64/amd64/db_trace.c
+++ b/sys/amd64/amd64/db_trace.c
@@ -30,7 +30,8 @@
#include <vm/vm_param.h>
#include <vm/lock.h>
-#include <vm/vm_statistics.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
#include <machine/pmap.h>
#include "systm.h"
#include "proc.h"
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index 00424bf..17400bd 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -438,7 +438,7 @@ npxintr(frame)
* in doreti, and the frame for that could easily be set up
* just before it is used).
*/
- curproc->p_regs = (int *)&frame.if_es;
+ curproc->p_md.md_regs = (int *)&frame.if_es;
#ifdef notyet
/*
* Encode the appropriate code for detailed information on
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index b7847e8..a75d1f1 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -37,21 +37,19 @@
* $Id: genassym.c,v 1.6 1993/11/13 02:24:59 davidg Exp $
*/
-#include "sys/param.h"
-#include "sys/buf.h"
-#include "sys/vmmeter.h"
-#include "sys/proc.h"
-#include "sys/user.h"
-#include "sys/mbuf.h"
-#include "sys/msgbuf.h"
-#include "sys/resourcevar.h"
-#include "machine/cpu.h"
-#include "machine/trap.h"
-#include "machine/psl.h"
-#include "sys/syscall.h"
-#include "vm/vm_param.h"
-#include "vm/vm_map.h"
-#include "machine/pmap.h"
+#include <sys/param.h>
+#include <sys/buf.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <machine/cpu.h>
+#include <machine/trap.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <sys/syscall.h>
+#include <vm/vm.h>
+#include <sys/user.h>
main()
{
@@ -70,12 +68,12 @@ main()
printf("#define\tI386_CR3PAT %d\n", I386_CR3PAT);
printf("#define\tUDOT_SZ %d\n", sizeof(struct user));
- printf("#define\tP_LINK %d\n", &p->p_link);
- printf("#define\tP_RLINK %d\n", &p->p_rlink);
+ printf("#define\tP_LINK %d\n", &p->p_forw);
+ printf("#define\tP_RLINK %d\n", &p->p_back);
printf("#define\tP_VMSPACE %d\n", &p->p_vmspace);
printf("#define\tVM_PMAP %d\n", &vms->vm_pmap);
printf("#define\tP_ADDR %d\n", &p->p_addr);
- printf("#define\tP_PRI %d\n", &p->p_pri);
+ printf("#define\tP_PRI %d\n", &p->p_priority);
printf("#define\tP_STAT %d\n", &p->p_stat);
printf("#define\tP_WCHAN %d\n", &p->p_wchan);
printf("#define\tP_FLAG %d\n", &p->p_flag);
@@ -87,10 +85,10 @@ main()
printf("#define\tV_SYSCALL %d\n", &vm->v_syscall);
printf("#define\tV_INTR %d\n", &vm->v_intr);
printf("#define\tV_SOFT %d\n", &vm->v_soft);
- printf("#define\tV_PDMA %d\n", &vm->v_pdma);
+/* printf("#define\tV_PDMA %d\n", &vm->v_pdma); */
printf("#define\tV_FAULTS %d\n", &vm->v_faults);
- printf("#define\tV_PGREC %d\n", &vm->v_pgrec);
- printf("#define\tV_FASTPGREC %d\n", &vm->v_fastpgrec);
+/* printf("#define\tV_PGREC %d\n", &vm->v_pgrec); */
+/* printf("#define\tV_FASTPGREC %d\n", &vm->v_fastpgrec); */
printf("#define\tUPAGES %d\n", UPAGES);
printf("#define\tHIGHPAGES %d\n", HIGHPAGES);
printf("#define\tCLSIZE %d\n", CLSIZE);
diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S
index 8da8438..7aa6e6b 100644
--- a/sys/amd64/amd64/locore.S
+++ b/sys/amd64/amd64/locore.S
@@ -274,7 +274,7 @@ NON_GPROF_ENTRY(btext)
movl $0xa0,%ecx
1:
#endif /* BDE_DEBUGGER */
- movl $PG_V|PG_KW,%eax /* having these bits set, */
+ movl $PG_V|PG_KW|PG_NC_PWT,%eax /* kernel R/W, valid, cache write-through */
lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
movl %ebx,_KPTphys-KERNBASE /* save in global */
fillkpt
@@ -302,7 +302,7 @@ NON_GPROF_ENTRY(btext)
movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
movl %esi,%eax /* phys address of PTD */
andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
- orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ orl $PG_V|PG_KW|PG_NC_PWT,%eax /* valid, kernel read/write, cache write-though */
movl %esi,%ebx /* calculate pte offset to ptd */
shrl $PGSHIFT-2,%ebx
addl %esi,%ebx /* address of page directory */
@@ -452,10 +452,26 @@ reloc_gdt:
pushl %esi /* value of first for init386(first) */
call _init386 /* wire 386 chip for unix operation */
+ popl %esi
+#if 0
movl $0,_PTD
+#endif
+
+ .globl __ucodesel,__udatasel
+
+ pushl $0 /* unused */
+ pushl __udatasel /* ss */
+ pushl $0 /* esp - filled in by execve() */
+ pushl $0x3200 /* eflags (ring 3, int enab) */
+ pushl __ucodesel /* cs */
+ pushl $0 /* eip - filled in by execve() */
+ subl $(12*4),%esp /* space for rest of registers */
+
+ pushl %esp /* call main with frame pointer */
call _main /* autoconfiguration, mountroot etc */
- popl %esi
+
+ addl $(13*4),%esp /* back to a frame we can return with */
/*
* now we've run main() and determined what cpu-type we are, we can
@@ -473,69 +489,16 @@ reloc_gdt:
* set up address space and stack so that we can 'return' to user mode
*/
1:
- .globl __ucodesel,__udatasel
movl __ucodesel,%eax
movl __udatasel,%ecx
- /* build outer stack frame */
- pushl %ecx /* user ss */
- pushl $USRSTACK /* user esp */
- pushl %eax /* user cs */
- pushl $0 /* user ip */
+
movl %cx,%ds
movl %cx,%es
movl %ax,%fs /* double map cs to fs */
movl %cx,%gs /* and ds to gs */
- lret /* goto user! */
-
- pushl $lretmsg1 /* "should never get here!" */
- call _panic
-lretmsg1:
- .asciz "lret: toinit\n"
+ iret /* goto user! */
-
-#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
-/*
- * Icode is copied out to process 1 and executed in user mode:
- * execve("/sbin/init", argv, envp); exit(0);
- * If the execve fails, process 1 exits and the system panics.
- */
-NON_GPROF_ENTRY(icode)
- pushl $0 /* envp for execve() */
-
-# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
- movl $argv,%eax
- subl $_icode,%eax
- pushl %eax /* argp for execve() */
-
-# pushl $init-_icode
- movl $init,%eax
- subl $_icode,%eax
- pushl %eax /* fname for execve() */
-
- pushl %eax /* dummy return address */
-
- movl $SYS_execve,%eax
- LCALL(0x7,0x0)
-
- /* exit if something botches up in the above execve() */
- pushl %eax /* execve failed, the errno will do for an */
- /* exit code because errnos are < 128 */
- pushl %eax /* dummy return address */
- movl $SYS_exit,%eax
- LCALL(0x7,0x0)
-
-init:
- .asciz "/sbin/init"
- ALIGN_DATA
-argv:
- .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
- .long eicode-_icode /* argv[1] follows icode after copyout */
- .long 0
-eicode:
-
- .globl _szicode
-_szicode:
- .long _szicode-_icode
+#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
NON_GPROF_ENTRY(sigcode)
call SIGF_HANDLER(%esp)
diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s
index 8da8438..7aa6e6b 100644
--- a/sys/amd64/amd64/locore.s
+++ b/sys/amd64/amd64/locore.s
@@ -274,7 +274,7 @@ NON_GPROF_ENTRY(btext)
movl $0xa0,%ecx
1:
#endif /* BDE_DEBUGGER */
- movl $PG_V|PG_KW,%eax /* having these bits set, */
+ movl $PG_V|PG_KW|PG_NC_PWT,%eax /* kernel R/W, valid, cache write-through */
lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
movl %ebx,_KPTphys-KERNBASE /* save in global */
fillkpt
@@ -302,7 +302,7 @@ NON_GPROF_ENTRY(btext)
movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
movl %esi,%eax /* phys address of PTD */
andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
- orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ orl $PG_V|PG_KW|PG_NC_PWT,%eax /* valid, kernel read/write, cache write-though */
movl %esi,%ebx /* calculate pte offset to ptd */
shrl $PGSHIFT-2,%ebx
addl %esi,%ebx /* address of page directory */
@@ -452,10 +452,26 @@ reloc_gdt:
pushl %esi /* value of first for init386(first) */
call _init386 /* wire 386 chip for unix operation */
+ popl %esi
+#if 0
movl $0,_PTD
+#endif
+
+ .globl __ucodesel,__udatasel
+
+ pushl $0 /* unused */
+ pushl __udatasel /* ss */
+ pushl $0 /* esp - filled in by execve() */
+ pushl $0x3200 /* eflags (ring 3, int enab) */
+ pushl __ucodesel /* cs */
+ pushl $0 /* eip - filled in by execve() */
+ subl $(12*4),%esp /* space for rest of registers */
+
+ pushl %esp /* call main with frame pointer */
call _main /* autoconfiguration, mountroot etc */
- popl %esi
+
+ addl $(13*4),%esp /* back to a frame we can return with */
/*
* now we've run main() and determined what cpu-type we are, we can
@@ -473,69 +489,16 @@ reloc_gdt:
* set up address space and stack so that we can 'return' to user mode
*/
1:
- .globl __ucodesel,__udatasel
movl __ucodesel,%eax
movl __udatasel,%ecx
- /* build outer stack frame */
- pushl %ecx /* user ss */
- pushl $USRSTACK /* user esp */
- pushl %eax /* user cs */
- pushl $0 /* user ip */
+
movl %cx,%ds
movl %cx,%es
movl %ax,%fs /* double map cs to fs */
movl %cx,%gs /* and ds to gs */
- lret /* goto user! */
-
- pushl $lretmsg1 /* "should never get here!" */
- call _panic
-lretmsg1:
- .asciz "lret: toinit\n"
+ iret /* goto user! */
-
-#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
-/*
- * Icode is copied out to process 1 and executed in user mode:
- * execve("/sbin/init", argv, envp); exit(0);
- * If the execve fails, process 1 exits and the system panics.
- */
-NON_GPROF_ENTRY(icode)
- pushl $0 /* envp for execve() */
-
-# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
- movl $argv,%eax
- subl $_icode,%eax
- pushl %eax /* argp for execve() */
-
-# pushl $init-_icode
- movl $init,%eax
- subl $_icode,%eax
- pushl %eax /* fname for execve() */
-
- pushl %eax /* dummy return address */
-
- movl $SYS_execve,%eax
- LCALL(0x7,0x0)
-
- /* exit if something botches up in the above execve() */
- pushl %eax /* execve failed, the errno will do for an */
- /* exit code because errnos are < 128 */
- pushl %eax /* dummy return address */
- movl $SYS_exit,%eax
- LCALL(0x7,0x0)
-
-init:
- .asciz "/sbin/init"
- ALIGN_DATA
-argv:
- .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
- .long eicode-_icode /* argv[1] follows icode after copyout */
- .long 0
-eicode:
-
- .globl _szicode
-_szicode:
- .long _szicode-_icode
+#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
NON_GPROF_ENTRY(sigcode)
call SIGF_HANDLER(%esp)
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index eab1075..31bc6c2 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -41,23 +41,24 @@
#include "npx.h"
#include "isa.h"
-#include <stddef.h>
-#include "param.h"
-#include "systm.h"
-#include "signalvar.h"
-#include "kernel.h"
-#include "map.h"
-#include "proc.h"
-#include "user.h"
-#include "exec.h" /* for PS_STRINGS */
-#include "buf.h"
-#include "reboot.h"
-#include "conf.h"
-#include "file.h"
-#include "callout.h"
-#include "malloc.h"
-#include "mbuf.h"
-#include "msgbuf.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#include <sys/reboot.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/callout.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/sysctl.h>
#ifdef SYSVSHM
#include "sys/shm.h"
@@ -94,7 +95,7 @@ static void identifycpu(void);
static void initcpu(void);
static int test_page(int *, int);
-extern int grow(struct proc *,int);
+extern int grow(struct proc *,u_int);
const char machine[] = "PC-Class";
const char *cpu_model;
@@ -121,6 +122,7 @@ int bouncepages = BOUNCEPAGES;
#else
int bouncepages = 0;
#endif
+int msgbufmapped = 0; /* set when safe to use msgbuf */
extern int freebufspace;
extern char *bouncememory;
@@ -141,6 +143,12 @@ extern cyloffset;
int cpu_class;
void dumpsys __P((void));
+vm_offset_t buffer_sva, buffer_eva;
+vm_offset_t clean_sva, clean_eva;
+vm_offset_t pager_sva, pager_eva;
+int maxbkva, pager_map_size;
+
+#define offsetof(type, member) ((size_t)(&((type *)0)->member))
void
cpu_startup()
@@ -275,18 +283,19 @@ again:
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
- /*
- * Allocate a submap for buffer space allocations.
- * XXX we are NOT using buffer_map, but due to
- * the references to it we will just allocate 1 page of
- * vm (not real memory) to make things happy...
- */
- buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- /* bufpages * */NBPG, TRUE);
+ clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
+ (nbuf*MAXBSIZE) + VM_PHYS_SIZE + maxbkva + pager_map_size, TRUE);
+
+ io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
+ pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
+ pager_map_size, TRUE);
+
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf * MAXBSIZE), TRUE);
/*
* Allocate a submap for physio
*/
- phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ phys_map = kmem_suballoc(clean_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
/*
@@ -296,7 +305,7 @@ again:
mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
M_MBUF, M_NOWAIT);
bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
- mb_map = kmem_suballoc(kmem_map, (vm_offset_t)&mbutl, &maxaddr,
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
/*
* Initialize callouts
@@ -305,7 +314,7 @@ again:
for (i = 1; i < ncallout; i++)
callout[i-1].c_next = &callout[i];
- printf("avail memory = %d (%d pages)\n", ptoa(vm_page_free_count), vm_page_free_count);
+ printf("avail memory = %d (%d pages)\n", ptoa(cnt.v_free_count), cnt.v_free_count);
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
@@ -437,11 +446,11 @@ sendsig(catcher, sig, mask, code)
register struct proc *p = curproc;
register int *regs;
register struct sigframe *fp;
- struct sigacts *ps = p->p_sigacts;
+ struct sigacts *psp = p->p_sigacts;
int oonstack, frmtrap;
- regs = p->p_regs;
- oonstack = ps->ps_onstack;
+ regs = p->p_md.md_regs;
+ oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
/*
* Allocate and validate space for the signal handler
* context. Note that if the stack is in P0 space, the
@@ -449,10 +458,12 @@ sendsig(catcher, sig, mask, code)
* will fail if the process has not already allocated
* the space with a `brk'.
*/
- if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
- fp = (struct sigframe *)(ps->ps_sigsp
- - sizeof(struct sigframe));
- ps->ps_onstack = 1;
+ if ((psp->ps_flags & SAS_ALTSTACK) &&
+ (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
+ psp->ps_sigstk.ss_size - sizeof(struct sigframe));
+ psp->ps_sigstk.ss_flags |= SA_ONSTACK;
} else {
fp = (struct sigframe *)(regs[tESP]
- sizeof(struct sigframe));
@@ -540,7 +551,7 @@ sigreturn(p, uap, retval)
{
register struct sigcontext *scp;
register struct sigframe *fp;
- register int *regs = p->p_regs;
+ register int *regs = p->p_md.md_regs;
int eflags;
/*
@@ -614,7 +625,10 @@ sigreturn(p, uap, retval)
if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
return(EINVAL);
- p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
+ if (scp->sc_onstack & 01)
+ p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
+ else
+ p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
p->p_sigmask = scp->sc_mask &~
(sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
regs[tEBP] = scp->sc_fp;
@@ -651,7 +665,7 @@ boot(arghowto)
for(;;);
}
howto = arghowto;
- if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
+ if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
register struct buf *bp;
int iter, nbusy;
@@ -818,13 +832,13 @@ setregs(p, entry, stack)
u_long entry;
u_long stack;
{
- p->p_regs[tEBP] = 0; /* bottom of the fp chain */
- p->p_regs[tEIP] = entry;
- p->p_regs[tESP] = stack;
- p->p_regs[tSS] = _udatasel;
- p->p_regs[tDS] = _udatasel;
- p->p_regs[tES] = _udatasel;
- p->p_regs[tCS] = _ucodesel;
+ p->p_md.md_regs[tEBP] = 0; /* bottom of the fp chain */
+ p->p_md.md_regs[tEIP] = entry;
+ p->p_md.md_regs[tESP] = stack;
+ p->p_md.md_regs[tSS] = _udatasel;
+ p->p_md.md_regs[tDS] = _udatasel;
+ p->p_md.md_regs[tES] = _udatasel;
+ p->p_md.md_regs[tCS] = _ucodesel;
p->p_addr->u_pcb.pcb_flags = 0; /* no fp at all */
load_cr0(rcr0() | CR0_TS); /* start emulating */
@@ -834,6 +848,34 @@ setregs(p, entry, stack)
}
/*
+ * machine dependent system variables.
+ */
+int
+cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+
+ /* all sysctl names at this level are terminal */
+ if (namelen != 1)
+ return (ENOTDIR); /* overloaded */
+
+ switch (name[0]) {
+ case CPU_CONSDEV:
+ return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
+ sizeof cn_tty->t_dev));
+ default:
+ return (EOPNOTSUPP);
+ }
+ /* NOTREACHED */
+}
+
+/*
* Initialize 386 and configure to run kernel
*/
@@ -1105,9 +1147,11 @@ init386(first)
r_gdt.rd_limit = sizeof(gdt) - 1;
r_gdt.rd_base = (int) gdt;
lgdt(&r_gdt);
+
r_idt.rd_limit = sizeof(idt) - 1;
r_idt.rd_base = (int) idt;
lidt(&r_idt);
+
_default_ldt = GSEL(GLDT_SEL, SEL_KPL);
lldt(_default_ldt);
currentldt = _default_ldt;
@@ -1339,7 +1383,7 @@ _remque(element)
* The registers are in the frame; the frame is in the user area of
* the process in question; when the process is active, the registers
* are in "the kernel stack"; when it's not, they're still there, but
- * things get flipped around. So, since p->p_regs is the whole address
+ * things get flipped around. So, since p->p_md.md_regs is the whole address
* of the register set, take its offset from the kernel stack, and
* index into the user block. Don't you just *love* virtual memory?
* (I'm starting to think seymour is right...)
@@ -1348,7 +1392,7 @@ _remque(element)
int
ptrace_set_pc (struct proc *p, unsigned int addr) {
void *regs = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
((struct trapframe *)regs)->tf_eip = addr;
return 0;
@@ -1357,7 +1401,7 @@ ptrace_set_pc (struct proc *p, unsigned int addr) {
int
ptrace_single_step (struct proc *p) {
void *regs = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
((struct trapframe *)regs)->tf_eflags |= PSL_T;
return 0;
@@ -1370,7 +1414,7 @@ ptrace_single_step (struct proc *p) {
int
ptrace_getregs (struct proc *p, unsigned int *addr) {
int error;
- struct regs regs = {0};
+ struct reg regs = {0};
if (error = fill_regs (p, &regs))
return error;
@@ -1381,7 +1425,7 @@ ptrace_getregs (struct proc *p, unsigned int *addr) {
int
ptrace_setregs (struct proc *p, unsigned int *addr) {
int error;
- struct regs regs = {0};
+ struct reg regs = {0};
if (error = copyin (addr, &regs, sizeof(regs)))
return error;
@@ -1390,11 +1434,11 @@ ptrace_setregs (struct proc *p, unsigned int *addr) {
}
int
-fill_regs(struct proc *p, struct regs *regs) {
+fill_regs(struct proc *p, struct reg *regs) {
int error;
struct trapframe *tp;
void *ptr = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
tp = ptr;
regs->r_es = tp->tf_es;
@@ -1415,11 +1459,11 @@ fill_regs(struct proc *p, struct regs *regs) {
}
int
-set_regs (struct proc *p, struct regs *regs) {
+set_regs (struct proc *p, struct reg *regs) {
int error;
struct trapframe *tp;
void *ptr = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
tp = ptr;
tp->tf_es = regs->r_es;
@@ -1444,6 +1488,69 @@ set_regs (struct proc *p, struct regs *regs) {
void
Debugger(const char *msg)
{
- printf("Debugger(\"%s\") called.", msg);
+ printf("Debugger(\"%s\") called.\n", msg);
}
#endif /* no DDB */
+
+#include <sys/disklabel.h>
+#define b_cylin b_resid
+#define dkpart(dev) (minor(dev) & 7)
+/*
+ * Determine the size of the transfer, and make sure it is
+ * within the boundaries of the partition. Adjust transfer
+ * if needed, and signal errors or early completion.
+ */
+int
+bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
+{
+ struct partition *p = lp->d_partitions + dkpart(bp->b_dev);
+ int labelsect = lp->d_partitions[0].p_offset;
+ int maxsz = p->p_size,
+ sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT;
+
+ /* overwriting disk label ? */
+ /* XXX should also protect bootstrap in first 8K */
+ if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect &&
+#if LABELSECTOR != 0
+ bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect &&
+#endif
+ (bp->b_flags & B_READ) == 0 && wlabel == 0) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+
+#if defined(DOSBBSECTOR) && defined(notyet)
+ /* overwriting master boot record? */
+ if (bp->b_blkno + p->p_offset <= DOSBBSECTOR &&
+ (bp->b_flags & B_READ) == 0 && wlabel == 0) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+#endif
+
+ /* beyond partition? */
+ if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) {
+ /* if exactly at end of disk, return an EOF */
+ if (bp->b_blkno == maxsz) {
+ bp->b_resid = bp->b_bcount;
+ return(0);
+ }
+ /* or truncate if part of it fits */
+ sz = maxsz - bp->b_blkno;
+ if (sz <= 0) {
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+ bp->b_bcount = sz << DEV_BSHIFT;
+ }
+
+ /* calculate cylinder for disksort to order transfers with */
+ bp->b_pblkno = bp->b_blkno + p->p_offset;
+ bp->b_cylin = bp->b_pblkno / lp->d_secpercyl;
+ return(1);
+
+bad:
+ bp->b_flags |= B_ERROR;
+ return(-1);
+}
+
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index c3899a1..1b8f187 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -45,24 +45,23 @@
* Memory special file
*/
-#include "param.h"
-#include "conf.h"
-#include "buf.h"
-#include "systm.h"
-#include "uio.h"
-#include "malloc.h"
-#include "proc.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
-#include "machine/cpu.h"
-#include "machine/psl.h"
+#include <machine/cpu.h>
+#include <machine/psl.h>
-#include "vm/vm_param.h"
-#include "vm/lock.h"
-#include "vm/vm_statistics.h"
-#include "vm/vm_prot.h"
-#include "vm/pmap.h"
+#include <vm/vm_param.h>
+#include <vm/lock.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
-extern char *vmmap; /* poor name! */
+extern char *ptvmmap; /* poor name! */
/*ARGSUSED*/
int
mmclose(dev, uio, flags)
@@ -74,7 +73,7 @@ mmclose(dev, uio, flags)
switch (minor(dev)) {
case 14:
- fp = (struct trapframe *)curproc->p_regs;
+ fp = (struct trapframe *)curproc->p_md.md_regs;
fp->tf_eflags &= ~PSL_IOPL;
break;
default:
@@ -93,7 +92,7 @@ mmopen(dev, uio, flags)
switch (minor(dev)) {
case 14:
- fp = (struct trapframe *)curproc->p_regs;
+ fp = (struct trapframe *)curproc->p_md.md_regs;
fp->tf_eflags |= PSL_IOPL;
break;
default:
@@ -128,25 +127,25 @@ mmrw(dev, uio, flags)
/* minor device 0 is physical memory */
case 0:
v = uio->uio_offset;
- pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, v,
+ pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
TRUE);
o = (int)uio->uio_offset & PGOFSET;
c = (u_int)(NBPG - ((int)iov->iov_base & PGOFSET));
- c = MIN(c, (u_int)(NBPG - o));
- c = MIN(c, (u_int)iov->iov_len);
- error = uiomove((caddr_t)&vmmap[o], (int)c, uio);
- pmap_remove(pmap_kernel(), (vm_offset_t)vmmap,
- (vm_offset_t)&vmmap[NBPG]);
+ c = min(c, (u_int)(NBPG - o));
+ c = min(c, (u_int)iov->iov_len);
+ error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
+ pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
+ (vm_offset_t)&ptvmmap[NBPG]);
continue;
/* minor device 1 is kernel memory */
case 1:
c = iov->iov_len;
- if (!kernacc((caddr_t)uio->uio_offset, c,
+ if (!kernacc((caddr_t)(int)uio->uio_offset, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return(EFAULT);
- error = uiomove((caddr_t)uio->uio_offset, (int)c, uio);
+ error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
continue;
/* minor device 2 is EOF/RATHOLE */
@@ -167,7 +166,7 @@ mmrw(dev, uio, flags)
malloc(CLBYTES, M_TEMP, M_WAITOK);
bzero(zbuf, CLBYTES);
}
- c = MIN(iov->iov_len, CLBYTES);
+ c = min(iov->iov_len, CLBYTES);
error = uiomove(zbuf, (int)c, uio);
continue;
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index d5b556f..88db9dd 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -85,19 +85,19 @@
* and to when physical maps must be made correct.
*/
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "malloc.h"
-#include "user.h"
-#include "i386/include/cpufunc.h"
-#include "i386/include/cputypes.h"
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
-#include "vm/vm.h"
-#include "vm/vm_kern.h"
-#include "vm/vm_page.h"
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
-#include "i386/isa/isa.h"
+#include <i386/include/cpufunc.h>
+#include <i386/include/cputypes.h>
+
+#include <i386/isa/isa.h>
/*
* Allocate various and sundry SYSMAPs used in the days of old VM
@@ -149,12 +149,12 @@ static inline void *vm_get_pmap();
static inline void vm_put_pmap();
inline void pmap_use_pt();
inline void pmap_unuse_pt();
-inline pt_entry_t * const pmap_pte();
+inline pt_entry_t * pmap_pte();
static inline pv_entry_t get_pv_entry();
void pmap_alloc_pv_entry();
void pmap_clear_modify();
void i386_protection_init();
-extern vm_offset_t pager_sva, pager_eva;
+extern vm_offset_t clean_sva, clean_eva;
extern int cpu_class;
#if BSDVM_COMPAT
@@ -163,8 +163,8 @@ extern int cpu_class;
/*
* All those kernel PT submaps that BSD is so fond of
*/
-pt_entry_t *CMAP1, *CMAP2, *mmap;
-caddr_t CADDR1, CADDR2, vmmap;
+pt_entry_t *CMAP1, *CMAP2, *ptmmap;
+caddr_t CADDR1, CADDR2, ptvmmap;
pt_entry_t *msgbufmap;
struct msgbuf *msgbufp;
#endif
@@ -180,8 +180,8 @@ void init_pv_entries(int) ;
*/
inline pt_entry_t *
-const pmap_pte(pmap, va)
- register pmap_t pmap;
+pmap_pte(pmap, va)
+ pmap_t pmap;
vm_offset_t va;
{
@@ -374,7 +374,7 @@ pmap_bootstrap(firstaddr, loadaddr)
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
- SYSMAP(caddr_t ,mmap ,vmmap ,1 )
+ SYSMAP(caddr_t ,ptmmap ,ptvmmap ,1 )
SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 )
virtual_avail = va;
#endif
@@ -530,7 +530,7 @@ static inline void
vm_put_pmap(up)
struct pmaplist *up;
{
- kmem_free(kernel_map, up, ctob(1));
+ kmem_free(kernel_map, (vm_offset_t)up, ctob(1));
}
/*
@@ -851,7 +851,7 @@ pmap_remove(pmap, sva, eva)
if (pmap_is_managed(pa)) {
if ((((int) oldpte & PG_M) && (sva < USRSTACK || sva > UPT_MAX_ADDRESS))
|| (sva >= USRSTACK && sva < USRSTACK+(UPAGES*NBPG))) {
- if (sva < pager_sva || sva >= pager_eva) {
+ if (sva < clean_sva || sva >= clean_eva) {
m = PHYS_TO_VM_PAGE(pa);
m->flags &= ~PG_CLEAN;
}
@@ -941,7 +941,7 @@ pmap_remove(pmap, sva, eva)
if ((((int) oldpte & PG_M) && (va < USRSTACK || va > UPT_MAX_ADDRESS))
|| (va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
- if (va < pager_sva || va >= pager_eva) {
+ if (va < clean_sva || va >= clean_eva ) {
m = PHYS_TO_VM_PAGE(pa);
m->flags &= ~PG_CLEAN;
}
@@ -1006,7 +1006,7 @@ pmap_remove_all(pa)
if ( (m->flags & PG_CLEAN) &&
((((int) *pte) & PG_M) && (pv->pv_va < USRSTACK || pv->pv_va > UPT_MAX_ADDRESS))
|| (pv->pv_va >= USRSTACK && pv->pv_va < USRSTACK+(UPAGES*NBPG))) {
- if (pv->pv_va < pager_sva || pv->pv_va >= pager_eva) {
+ if (pv->pv_va < clean_sva || pv->pv_va >= clean_eva) {
m->flags &= ~PG_CLEAN;
}
}
@@ -1261,7 +1261,11 @@ validate:
if (va < UPT_MIN_ADDRESS)
(int) npte |= PG_u;
else if (va < UPT_MAX_ADDRESS)
- (int) npte |= PG_u | PG_RW;
+ (int) npte |= PG_u | PG_RW | PG_NC_PWT;
+
+/*
+ printf("mapping: pa: %x, to va: %x, with pte: %x\n", pa, va, npte);
+*/
if( *pte != npte) {
*pte = npte;
@@ -1414,7 +1418,7 @@ validate:
/*
* Now validate mapping with desired protection/wiring.
*/
- *pte = (pt_entry_t) ( (int) (pa | PG_RO | PG_V | PG_u));
+ *pte = (pt_entry_t) ( (int) (pa | PG_V | PG_u));
}
/*
@@ -1448,16 +1452,16 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
*/
if( size > object->size / 2) {
objbytes = size;
- p = (vm_page_t) queue_first(&object->memq);
- while (!queue_end(&object->memq, (queue_entry_t) p) && objbytes != 0) {
+ p = object->memq.tqh_first;
+ while ((p != NULL) && (objbytes != 0)) {
tmpoff = p->offset;
if( tmpoff < offset) {
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
continue;
}
tmpoff -= offset;
if( tmpoff >= size) {
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
continue;
}
@@ -1469,7 +1473,7 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
vm_page_unhold(p);
pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
}
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
objbytes -= NBPG;
}
} else {
@@ -1699,13 +1703,13 @@ pmap_testbit(pa, bit)
* ptes as never modified.
*/
if (bit & PG_U ) {
- if ((pv->pv_va >= pager_sva) && (pv->pv_va < pager_eva)) {
+ if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
continue;
}
}
if (bit & PG_M ) {
if (pv->pv_va >= USRSTACK) {
- if (pv->pv_va >= pager_sva && pv->pv_va < pager_eva) {
+ if (pv->pv_va >= clean_sva && pv->pv_va < clean_eva) {
continue;
}
if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) {
@@ -1761,7 +1765,7 @@ pmap_changebit(pa, bit, setem)
* don't write protect pager mappings
*/
if (!setem && (bit == PG_RW)) {
- if (va >= pager_sva && va < pager_eva)
+ if (va >= clean_sva && va < clean_eva)
continue;
}
@@ -1869,6 +1873,10 @@ pmap_phys_address(ppn)
/*
* Miscellaneous support routines follow
*/
+/*
+ * This really just builds a table for page write enable
+ * translation.
+ */
void
i386_protection_init()
@@ -1879,12 +1887,10 @@ i386_protection_init()
for (prot = 0; prot < 8; prot++) {
switch (prot) {
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
- *kp++ = 0;
- break;
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
- *kp++ = PG_RO;
+ *kp++ = 0;
break;
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
index e808222..9634069 100644
--- a/sys/amd64/amd64/support.S
+++ b/sys/amd64/amd64/support.S
@@ -185,6 +185,7 @@ ENTRY(outsl) /* outsl(port, addr, cnt) */
* memory moves on standard DX !!!!!
*/
+ALTENTRY(blkclr)
ENTRY(bzero)
#if defined(I486_CPU) && (defined(I386_CPU) || defined(I586_CPU))
cmpl $CPUCLASS_486,_cpu_class
@@ -656,6 +657,17 @@ ENTRY(fuword)
movl $0,PCB_ONFAULT(%ecx)
ret
+/*
+ * These two routines are called from the profiling code, potentially
+ * at interrupt time. If they fail, that's okay, good things will
+ * happen later. Fail all the time for now - until the trap code is
+ * able to deal with this.
+ */
+ALTENTRY(suswintr)
+ENTRY(fuswintr)
+ movl $-1,%eax
+ ret
+
ENTRY(fusword)
movl _curpcb,%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s
index e808222..9634069 100644
--- a/sys/amd64/amd64/support.s
+++ b/sys/amd64/amd64/support.s
@@ -185,6 +185,7 @@ ENTRY(outsl) /* outsl(port, addr, cnt) */
* memory moves on standard DX !!!!!
*/
+ALTENTRY(blkclr)
ENTRY(bzero)
#if defined(I486_CPU) && (defined(I386_CPU) || defined(I586_CPU))
cmpl $CPUCLASS_486,_cpu_class
@@ -656,6 +657,17 @@ ENTRY(fuword)
movl $0,PCB_ONFAULT(%ecx)
ret
+/*
+ * These two routines are called from the profiling code, potentially
+ * at interrupt time. If they fail, that's okay, good things will
+ * happen later. Fail all the time for now - until the trap code is
+ * able to deal with this.
+ */
+ALTENTRY(suswintr)
+ENTRY(fuswintr)
+ movl $-1,%eax
+ ret
+
ENTRY(fusword)
movl _curpcb,%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index 4dbc672..aa8b5ba 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -52,9 +52,9 @@
/*
* The following primitives manipulate the run queues.
* _whichqs tells which of the 32 queues _qs
- * have processes in them. Setrq puts processes into queues, Remrq
+ * have processes in them. setrunqueue puts processes into queues, Remrq
* removes them from queues. The running process is on no queue,
- * other processes are on a queue related to p->p_pri, divided by 4
+ * other processes are on a queue related to p->p_priority, divided by 4
* actually to shrink the 0-127 range of priorities into the 32 available
* queues.
*/
@@ -72,11 +72,11 @@ _want_resched: .long 0 /* we need to re-run the scheduler */
.text
/*
- * Setrq(p)
+ * setrunqueue(p)
*
* Call should be made at spl6(), and p->p_stat should be SRUN
*/
-ENTRY(setrq)
+ENTRY(setrunqueue)
movl 4(%esp),%eax
cmpl $0,P_RLINK(%eax) /* should not be on q already */
je set1
@@ -95,7 +95,7 @@ set1:
movl %eax,P_LINK(%ecx)
ret
-set2: .asciz "setrq"
+set2: .asciz "setrunqueue"
/*
* Remrq(p)
@@ -131,10 +131,10 @@ rem2:
ret
rem3: .asciz "remrq"
-sw0: .asciz "swtch"
+sw0: .asciz "cpu_switch"
/*
- * When no processes are on the runq, swtch() branches to _idle
+ * When no processes are on the runq, cpu_switch() branches to _idle
* to wait for something to come ready.
*/
ALIGN_TEXT
@@ -146,8 +146,8 @@ _idle:
sti
/*
- * XXX callers of swtch() do a bogus splclock(). Locking should
- * be left to swtch().
+ * XXX callers of cpu_switch() do a bogus splclock(). Locking should
+ * be left to cpu_switch().
*/
movl $SWI_AST_MASK,_cpl
testl $~SWI_AST_MASK,_ipending
@@ -169,9 +169,9 @@ badsw:
/*NOTREACHED*/
/*
- * Swtch()
+ * cpu_switch()
*/
-ENTRY(swtch)
+ENTRY(cpu_switch)
incl _cnt+V_SWTCH
/* switch to new process. first, save context as needed */
@@ -340,7 +340,7 @@ ENTRY(swtch_to_inactive)
/*
* savectx(pcb, altreturn)
* Update pcb, saving current processor state and arranging
- * for alternate return ala longjmp in swtch if altreturn is true.
+ * for alternate return ala longjmp in cpu_switch if altreturn is true.
*/
ENTRY(savectx)
movl 4(%esp),%ecx
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 9bb38e1..382416f 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -41,32 +41,33 @@
* 386 Trap and System call handleing
*/
-#include "isa.h"
-#include "npx.h"
-#include "ddb.h"
-#include "machine/cpu.h"
-#include "machine/psl.h"
-#include "machine/reg.h"
-#include "machine/eflags.h"
-
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "user.h"
-#include "acct.h"
-#include "kernel.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/acct.h>
+#include <sys/kernel.h>
+#include <sys/syscall.h>
#ifdef KTRACE
-#include "ktrace.h"
+#include <sys/ktrace.h>
#endif
-#include "vm/vm_param.h"
-#include "vm/pmap.h"
-#include "vm/vm_map.h"
-#include "vm/vm_user.h"
-#include "vm/vm_page.h"
-#include "sys/vmmeter.h"
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+
+#include <machine/cpu.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <machine/eflags.h>
+
+#include <machine/trap.h>
-#include "machine/trap.h"
+#include "isa.h"
+#include "npx.h"
+#include "ddb.h"
#ifdef __GNUC__
@@ -84,7 +85,7 @@ void write_gs __P((/* promoted u_short */ int gs));
#endif /* __GNUC__ */
-extern int grow(struct proc *,int);
+extern int grow(struct proc *,u_int);
struct sysent sysent[];
int nsysent;
@@ -139,7 +140,7 @@ trap(frame)
{
register int i;
register struct proc *p = curproc;
- struct timeval syst;
+ u_quad_t sticks = 0;
int ucode, type, code, eva, fault_type;
frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */
@@ -177,10 +178,10 @@ copyfault:
return;
}
- syst = p->p_stime;
if (ISPL(frame.tf_cs) == SEL_UPL) {
type |= T_USER;
- p->p_regs = (int *)&frame;
+ p->p_md.md_regs = (int *)&frame;
+ sticks = p->p_sticks;
}
skiptoswitch:
@@ -210,9 +211,9 @@ skiptoswitch:
case T_ASTFLT|T_USER: /* Allow process switch */
astoff();
cnt.v_soft++;
- if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
+ if ((p->p_flag & P_OWEUPC) && p->p_stats->p_prof.pr_scale) {
addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
- p->p_flag &= ~SOWEUPC;
+ p->p_flag &= ~P_OWEUPC;
}
goto out;
@@ -284,7 +285,6 @@ skiptoswitch:
else
ftype = VM_PROT_READ;
- oldflags = p->p_flag;
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
@@ -294,7 +294,7 @@ skiptoswitch:
* Keep swapout from messing with us during this
* critical time.
*/
- p->p_flag |= SLOCK;
+ ++p->p_lock;
/*
* Grow the stack if necessary
@@ -303,8 +303,7 @@ skiptoswitch:
&& (caddr_t)va < (caddr_t)USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
goto nogo;
}
}
@@ -332,13 +331,10 @@ skiptoswitch:
if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
VM_PROT_NONE);
- if( ptepg->flags & PG_CLEAN)
- vm_page_free(ptepg);
+ vm_page_free(ptepg);
}
-
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
} else {
/*
* Since we know that kernel virtual address addresses
@@ -482,32 +478,29 @@ nogo:
out:
while (i = CURSIG(p))
- psig(i);
- p->p_pri = p->p_usrpri;
+ postsig(i);
+ p->p_priority = p->p_usrpri;
if (want_resched) {
int s;
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
- * If that happened after we setrq ourselves but before we
- * swtch()'ed, we might not be on the queue indicated by
+ * If that happened after we setrunqueue ourselves but before we
+ * mi_switch()'ed, we might not be on the queue indicated by
* our priority.
*/
s = splclock();
- setrq(p);
+ setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- swtch();
+ mi_switch();
splx(s);
while (i = CURSIG(p))
- psig(i);
+ postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
- int ticks;
- struct timeval *tv = &p->p_stime;
+ u_quad_t ticks = p->p_sticks - sticks;
- ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
- (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
@@ -518,7 +511,7 @@ out:
#endif
}
}
- curpri = p->p_pri;
+ curpriority = p->p_priority;
}
/*
@@ -546,14 +539,12 @@ int trapwrite(addr)
p = curproc;
vm = p->p_vmspace;
- oldflags = p->p_flag;
- p->p_flag |= SLOCK;
+ ++p->p_lock;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
if (!grow(p, va)) {
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
return (1);
}
}
@@ -579,8 +570,7 @@ int trapwrite(addr)
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
if (rv != KERN_SUCCESS)
return 1;
@@ -603,31 +593,45 @@ syscall(frame)
register int i;
register struct sysent *callp;
register struct proc *p = curproc;
- struct timeval syst;
+ u_quad_t sticks;
int error, opc;
int args[8], rval[2];
- int code;
+ u_int code;
#ifdef lint
r0 = 0; r0 = r0; r1 = 0; r1 = r1;
#endif
- syst = p->p_stime;
+ sticks = p->p_sticks;
if (ISPL(frame.tf_cs) != SEL_UPL)
panic("syscall");
code = frame.tf_eax;
- p->p_regs = (int *)&frame;
+ p->p_md.md_regs = (int *)&frame;
params = (caddr_t)frame.tf_esp + sizeof (int) ;
/*
* Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
*/
opc = frame.tf_eip - 7;
- if (code == 0) {
+ /*
+ * Need to check if this is a 32 bit or 64 bit syscall.
+ */
+ if (code == SYS_syscall) {
+ /*
+ * Code is first argument, followed by actual args.
+ */
code = fuword(params);
params += sizeof (int);
+ } else if (code == SYS___syscall) {
+ /*
+ * Like syscall, but code is a quad, so as to maintain
+ * quad alignment for the rest of the arguments.
+ */
+ code = fuword(params + _QUAD_LOWWORD * sizeof(int));
+ params += sizeof(quad_t);
}
- if (code < 0 || code >= nsysent)
+
+ if (code >= nsysent)
callp = &sysent[0];
else
callp = &sysent[code];
@@ -672,32 +676,29 @@ done:
*/
p = curproc;
while (i = CURSIG(p))
- psig(i);
- p->p_pri = p->p_usrpri;
+ postsig(i);
+ p->p_priority = p->p_usrpri;
if (want_resched) {
int s;
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
- * If that happened after we setrq ourselves but before we
+ * If that happened after we setrunqueue ourselves but before we
* swtch()'ed, we might not be on the queue indicated by
* our priority.
*/
s = splclock();
- setrq(p);
+ setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- swtch();
+ mi_switch();
splx(s);
while (i = CURSIG(p))
- psig(i);
+ postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
- int ticks;
- struct timeval *tv = &p->p_stime;
+ u_quad_t ticks = p->p_sticks - sticks;
- ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
- (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
@@ -708,21 +709,9 @@ done:
#endif
}
}
- curpri = p->p_pri;
+ curpriority = p->p_priority;
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET))
ktrsysret(p->p_tracep, code, error, rval[0]);
#endif
-#ifdef DIAGNOSTICx
-{ extern int _udatasel, _ucodesel;
- if (frame.tf_ss != _udatasel)
- printf("ss %x call %d\n", frame.tf_ss, code);
- if ((frame.tf_cs&0xffff) != _ucodesel)
- printf("cs %x call %d\n", frame.tf_cs, code);
- if (frame.tf_eip > VM_MAXUSER_ADDRESS) {
- printf("eip %x call %d\n", frame.tf_eip, code);
- frame.tf_eip = 0;
- }
-}
-#endif
}
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
index d338cd5..e40079a 100644
--- a/sys/amd64/amd64/tsc.c
+++ b/sys/amd64/amd64/tsc.c
@@ -50,6 +50,7 @@
#include "i386/isa/isa.h"
#include "i386/isa/rtc.h"
#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
/* X-tals being what they are, it's nice to be able to fudge this one... */
/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
@@ -71,15 +72,23 @@ static u_int hardclock_divisor;
void
-timerintr(struct intrframe frame)
+clkintr(frame)
+ struct clockframe frame;
{
- timer_func(frame);
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
switch (timer0_state) {
case 0:
break;
case 1:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
timer0_prescale = 0;
}
break;
@@ -96,7 +105,7 @@ timerintr(struct intrframe frame)
break;
case 3:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
disable_intr();
outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
@@ -111,6 +120,7 @@ timerintr(struct intrframe frame)
}
}
+#endif
int
acquire_timer0(int rate, void (*function)() )
@@ -395,16 +405,6 @@ test_inittodr(time_t base)
}
#endif
-
-/*
- * Restart the clock.
- */
-void
-resettodr()
-{
-}
-
-
/*
* Wire clock interrupt in.
*/
@@ -428,3 +428,15 @@ spinwait(int millisecs)
{
DELAY(1000 * millisecs);
}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index a892c29..a7c4e59 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -42,27 +42,21 @@
*/
#include "npx.h"
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "malloc.h"
-#include "buf.h"
-#include "user.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/buf.h>
+#include <sys/vnode.h>
+#include <sys/user.h>
-#include "../include/cpu.h"
+#include <machine/cpu.h>
-#include "vm/vm.h"
-#include "vm/vm_kern.h"
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
#define b_cylin b_resid
-#define MAXCLSTATS 256
-int clstats[MAXCLSTATS];
-int rqstats[MAXCLSTATS];
-
-
-#ifndef NOBOUNCE
-
caddr_t bouncememory;
vm_offset_t bouncepa, bouncepaend;
int bouncepages, bpwait;
@@ -75,7 +69,8 @@ unsigned *bounceallocarray;
int bouncefree;
#define SIXTEENMEG (4096*4096)
-#define MAXBKVA 1024
+#define MAXBKVA 512
+int maxbkva=MAXBKVA*NBPG;
/* special list that can be used at interrupt time for eventual kva free */
struct kvasfree {
@@ -258,6 +253,7 @@ int count;
pa = vm_bounce_page_find(1);
pmap_kenter(kva + i * NBPG, pa);
}
+ pmap_update();
return kva;
}
@@ -309,8 +305,8 @@ vm_bounce_alloc(bp)
bp->b_bufsize = bp->b_bcount;
}
- vastart = (vm_offset_t) bp->b_un.b_addr;
- vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bufsize;
+ vastart = (vm_offset_t) bp->b_data;
+ vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
vapstart = i386_trunc_page(vastart);
vapend = i386_round_page(vaend);
@@ -369,11 +365,11 @@ vm_bounce_alloc(bp)
/*
* save the original buffer kva
*/
- bp->b_savekva = bp->b_un.b_addr;
+ bp->b_savekva = bp->b_data;
/*
* put our new kva into the buffer (offset by original offset)
*/
- bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
+ bp->b_data = (caddr_t) (((vm_offset_t) kva) |
((vm_offset_t) bp->b_savekva & (NBPG - 1)));
return;
}
@@ -403,7 +399,7 @@ vm_bounce_free(bp)
return;
origkva = (vm_offset_t) bp->b_savekva;
- bouncekva = (vm_offset_t) bp->b_un.b_addr;
+ bouncekva = (vm_offset_t) bp->b_data;
vastart = bouncekva;
vaend = bouncekva + bp->b_bufsize;
@@ -449,17 +445,15 @@ vm_bounce_free(bp)
/*
* add the old kva into the "to free" list
*/
- bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
+ bouncekva = i386_trunc_page((vm_offset_t) bp->b_data);
vm_bounce_kva_free( bouncekva, countvmpg*NBPG, 0);
- bp->b_un.b_addr = bp->b_savekva;
+ bp->b_data = bp->b_savekva;
bp->b_savekva = 0;
bp->b_flags &= ~B_BOUNCE;
return;
}
-#endif /* NOBOUNCE */
-
/*
* init the bounce buffer system
*/
@@ -468,10 +462,8 @@ vm_bounce_init()
{
vm_offset_t minaddr, maxaddr;
- io_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
kvasfreecnt = 0;
-#ifndef NOBOUNCE
if (bouncepages == 0)
return;
@@ -487,11 +479,10 @@ vm_bounce_init()
bouncepa = pmap_kextract((vm_offset_t) bouncememory);
bouncepaend = bouncepa + bouncepages * NBPG;
bouncefree = bouncepages;
-#endif
-
}
+#ifdef BROKEN_IN_44
static void
cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
vm_offset_t kvanew;
@@ -827,6 +818,7 @@ nocluster:
ap->av_forw = bp;
bp->av_back = ap;
}
+#endif
/*
* quick version of vm_fault
@@ -881,7 +873,7 @@ cpu_fork(p1, p2)
offset = mvesp() - (int)kstack;
bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
(unsigned) ctob(UPAGES) - offset);
- p2->p_regs = p1->p_regs;
+ p2->p_md.md_regs = p1->p_md.md_regs;
/*
* Wire top of address space of child to it's kstack.
@@ -930,7 +922,7 @@ cpu_fork(p1, p2)
*
* Next, we assign a dummy context to be written over by swtch,
* calling it to send this process off to oblivion.
- * [The nullpcb allows us to minimize cost in swtch() by not having
+ * [The nullpcb allows us to minimize cost in mi_switch() by not having
* a special case].
*/
struct proc *swtch_to_inactive();
@@ -952,8 +944,7 @@ cpu_exit(p)
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
p->p_addr = (struct user *) &nullpcb;
- splclock();
- swtch();
+ mi_switch();
/* NOTREACHED */
}
#else
@@ -965,9 +956,8 @@ cpu_exit(p)
#if NNPX > 0
npxexit(p);
#endif /* NNPX */
- splclock();
- curproc = 0;
- swtch();
+ curproc = p;
+ mi_switch();
/*
* This is to shutup the compiler, and if swtch() failed I suppose
* this would be a good thing. This keeps gcc happy because panic
@@ -990,6 +980,21 @@ cpu_wait(p) struct proc *p; {
#endif
/*
+ * Dump the machine specific header information at the start of a core dump.
+ */
+int
+cpu_coredump(p, vp, cred)
+ struct proc *p;
+ struct vnode *vp;
+ struct ucred *cred;
+{
+
+ return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
+ (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
+ p));
+}
+
+/*
* Set a red zone in the kernel stack after the u. area.
*/
void
@@ -1008,6 +1013,43 @@ setredzone(pte, vaddr)
}
/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+
+void
+pagemove(from, to, size)
+ register caddr_t from, to;
+ int size;
+{
+ register vm_offset_t pa;
+
+ if (size & CLOFSET)
+ panic("pagemove");
+ while (size > 0) {
+ pa = pmap_kextract((vm_offset_t)from);
+ if (pa == 0)
+ panic("pagemove 2");
+ if (pmap_kextract((vm_offset_t)to) != 0)
+ panic("pagemove 3");
+ pmap_remove(kernel_pmap,
+ (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
+ pmap_kenter( (vm_offset_t)to, pa);
+ from += PAGE_SIZE;
+ to += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ pmap_update();
+}
+
+/*
* Convert kernel VA to physical address
*/
u_long
@@ -1036,22 +1078,49 @@ vmapbuf(bp)
{
register int npf;
register caddr_t addr;
- register long flags = bp->b_flags;
- struct proc *p;
int off;
vm_offset_t kva;
- register vm_offset_t pa;
+ vm_offset_t pa, lastv, v;
- if ((flags & B_PHYS) == 0)
+ if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
+
+ lastv = 0;
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += PAGE_SIZE) {
+
+/*
+ * make sure that the pde is valid and held
+ */
+ v = trunc_page(((vm_offset_t)vtopte(addr)));
+ if (v != lastv) {
+ vm_fault_quick(v, VM_PROT_READ);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ lastv = v;
+ }
+
+/*
+ * do the vm_fault if needed, do the copy-on-write thing when
+ * reading stuff off device into memory.
+ */
+ vm_fault_quick(addr,
+ (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
+/*
+ * hold the data page
+ */
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ }
+
addr = bp->b_saveaddr = bp->b_un.b_addr;
off = (int)addr & PGOFSET;
- p = bp->b_proc;
npf = btoc(round_page(bp->b_bufsize + off));
kva = kmem_alloc_wait(phys_map, ctob(npf));
bp->b_un.b_addr = (caddr_t) (kva + off);
while (npf--) {
- pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t)addr);
if (pa == 0)
panic("vmapbuf: null page frame");
pmap_kenter(kva, trunc_page(pa));
@@ -1071,7 +1140,7 @@ vunmapbuf(bp)
{
register int npf;
register caddr_t addr = bp->b_un.b_addr;
- vm_offset_t kva;
+ vm_offset_t kva,va,v,lastv,pa;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
@@ -1080,6 +1149,32 @@ vunmapbuf(bp)
kmem_free_wakeup(phys_map, kva, ctob(npf));
bp->b_un.b_addr = bp->b_saveaddr;
bp->b_saveaddr = NULL;
+
+
+/*
+ * unhold the pde, and data pages
+ */
+ lastv = 0;
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += NBPG) {
+
+ /*
+ * release the data page
+ */
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+
+ /*
+ * and unhold the page table
+ */
+ v = trunc_page(((vm_offset_t)vtopte(addr)));
+ if (v != lastv) {
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+ lastv = v;
+ }
+ }
}
/*
@@ -1104,7 +1199,7 @@ cpu_reset() {
int
grow(p, sp)
struct proc *p;
- int sp;
+ u_int sp;
{
unsigned int nss;
caddr_t v;
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index a2df023..2216d71 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -45,6 +45,7 @@
*/
#include "machine/frame.h"
#include "machine/segments.h"
+#include <machine/spl.h>
/*
* definitions of cpu-dependent requirements
@@ -53,20 +54,16 @@
#undef COPY_SIGCODE /* don't copy sigcode above user stack in exec */
#define cpu_exec(p) /* nothing */
+#define cpu_swapin(p) /* nothing */
+#define cpu_setstack(p, ap) (p)->p_md.md_regs = ap
+#define cpu_set_init_frame(p, fp) (p)->p_md.md_regs = fp
-/*
- * Arguments to hardclock, softclock and gatherstats
- * encapsulate the previous machine state in an opaque
- * clockframe; for now, use generic intrframe.
- * XXX softclock() has been fixed. It never needed a
- * whole frame, only a usermode flag, at least on this
- * machine. Fix the rest.
- */
-typedef struct intrframe clockframe;
+#define CLKF_USERMODE(framep) (ISPL((framep)->cf_cs) == SEL_UPL)
+#define CLKF_INTR(framep) (0)
+#define CLKF_BASEPRI(framep) (((framep)->cf_ppl & ~SWI_AST_MASK) == 0)
+#define CLKF_PC(framep) ((framep)->cf_eip)
-#define CLKF_USERMODE(framep) (ISPL((framep)->if_cs) == SEL_UPL)
-#define CLKF_BASEPRI(framep) (((framep)->if_ppl & ~SWI_AST_MASK) == 0)
-#define CLKF_PC(framep) ((framep)->if_eip)
+#define resettodr() /* no todr to set */
/*
* Preempt the current process if in interrupt from user mode,
@@ -79,7 +76,7 @@ typedef struct intrframe clockframe;
* interrupt. On tahoe, request an ast to send us through trap(),
* marking the proc as needing a profiling tick.
*/
-#define profile_tick(p, framep) { (p)->p_flag |= SOWEUPC; aston(); }
+#define need_proftick(p) { (p)->p_flag |= P_OWEUPC; aston(); }
/*
* Notify the current process (p) that it has a signal pending,
@@ -100,6 +97,17 @@ struct cpu_nameclass {
int cpu_class;
};
+/*
+ * CTL_MACHDEP definitions.
+ */
+#define CPU_CONSDEV 1 /* dev_t: console terminal device */
+#define CPU_MAXID 2 /* number of valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "console_device", CTLTYPE_STRUCT }, \
+}
+
#ifdef KERNEL
extern int want_resched; /* resched was called */
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index 3c2dcc9..729a5c0 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -71,145 +71,6 @@ tlbflush()
__asm __volatile("movl %%cr3, %%eax; movl %%eax, %%cr3" : : : "ax");
}
-static inline
-int
-imin(a, b)
- int a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-int
-imax(a, b)
- int a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-unsigned int
-min(a, b)
- unsigned int a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-unsigned int
-max(a, b)
- unsigned int a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-long
-lmin(a, b)
- long a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-long
-lmax(a, b)
- long a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-unsigned long
-ulmin(a, b)
- unsigned long a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-unsigned long
-ulmax(a, b)
- unsigned long a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-int
-ffs(mask)
- register long mask;
-{
- register int bit;
-
- if (!mask)
- return(0);
- for (bit = 1;; ++bit) {
- if (mask&0x01)
- return(bit);
- mask >>= 1;
- }
-}
-
-static inline
-int
-bcmp(v1, v2, len)
- void *v1, *v2;
- register unsigned len;
-{
- register u_char *s1 = v1, *s2 = v2;
-
- while (len--)
- if (*s1++ != *s2++)
- return (1);
- return (0);
-}
-
-static inline
-size_t
-strlen(s1)
- register const char *s1;
-{
- register size_t len;
-
- for (len = 0; *s1++ != '\0'; len++)
- ;
- return (len);
-}
-
-struct quehead {
- struct quehead *qh_link;
- struct quehead *qh_rlink;
-};
-
-static inline void
-insque(void *a, void *b)
-{
- register struct quehead *element = a, *head = b;
- element->qh_link = head->qh_link;
- head->qh_link = (struct quehead *)element;
- element->qh_rlink = (struct quehead *)head;
- ((struct quehead *)(element->qh_link))->qh_rlink
- = (struct quehead *)element;
-}
-
-static inline void
-remque(void *a)
-{
- register struct quehead *element = a;
- ((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
- ((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link;
- element->qh_rlink = 0;
-}
-
#else /* not __GNUC__ */
extern void insque __P((void *, void *));
extern void remque __P((void *));
diff --git a/sys/amd64/include/exec.h b/sys/amd64/include/exec.h
index eb587a4..f63ec49 100644
--- a/sys/amd64/include/exec.h
+++ b/sys/amd64/include/exec.h
@@ -33,51 +33,96 @@
* @(#)exec.h 8.1 (Berkeley) 6/11/93
*/
-/* Size of a page in an object file. */
+#ifndef _EXEC_H_
+#define _EXEC_H_
+
#define __LDPGSZ 4096
/* Valid magic number check. */
#define N_BADMAG(ex) \
- ((ex).a_magic != NMAGIC && (ex).a_magic != OMAGIC && \
- (ex).a_magic != ZMAGIC)
+ (N_GETMAGIC(ex) != OMAGIC && N_GETMAGIC(ex) != NMAGIC && \
+ N_GETMAGIC(ex) != ZMAGIC && N_GETMAGIC(ex) != QMAGIC && \
+ N_GETMAGIC_NET(ex) != OMAGIC && N_GETMAGIC_NET(ex) != NMAGIC && \
+ N_GETMAGIC_NET(ex) != ZMAGIC && N_GETMAGIC_NET(ex) != QMAGIC)
+
+#define N_ALIGN(ex,x) \
+ (N_GETMAGIC(ex) == ZMAGIC || N_GETMAGIC(ex) == QMAGIC || \
+ N_GETMAGIC_NET(ex) == ZMAGIC || N_GETMAGIC_NET(ex) == QMAGIC ? \
+ ((x) + __LDPGSZ - 1) & ~(__LDPGSZ - 1) : (x))
/* Address of the bottom of the text segment. */
-#define N_TXTADDR(X) 0
+#define N_TXTADDR(ex) \
+ ((N_GETMAGIC(ex) == OMAGIC || N_GETMAGIC(ex) == NMAGIC || \
+ N_GETMAGIC(ex) == ZMAGIC) ? 0 : __LDPGSZ)
/* Address of the bottom of the data segment. */
#define N_DATADDR(ex) \
- (N_TXTADDR(ex) + ((ex).a_magic == OMAGIC ? (ex).a_text \
- : __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1))))
+ N_ALIGN(ex, N_TXTADDR(ex) + (ex).a_text)
+
+#define N_GETMAGIC(ex) \
+ ( (ex).a_midmag & 0xffff )
+#define N_GETMID(ex) \
+ ( (N_GETMAGIC_NET(ex) == ZMAGIC) ? N_GETMID_NET(ex) : \
+ ((ex).a_midmag >> 16) & 0x03ff )
+#define N_GETFLAG(ex) \
+ ( (N_GETMAGIC_NET(ex) == ZMAGIC) ? N_GETFLAG_NET(ex) : \
+ ((ex).a_midmag >> 26) & 0x3f )
+#define N_SETMAGIC(ex,mag,mid,flag) \
+ ( (ex).a_midmag = (((flag) & 0x3f) <<26) | (((mid) & 0x03ff) << 16) | \
+ ((mag) & 0xffff) )
+
+#define N_GETMAGIC_NET(ex) \
+ (ntohl((ex).a_midmag) & 0xffff)
+#define N_GETMID_NET(ex) \
+ ((ntohl((ex).a_midmag) >> 16) & 0x03ff)
+#define N_GETFLAG_NET(ex) \
+ ((ntohl((ex).a_midmag) >> 26) & 0x3f)
+#define N_SETMAGIC_NET(ex,mag,mid,flag) \
+ ( (ex).a_midmag = htonl( (((flag)&0x3f)<<26) | (((mid)&0x03ff)<<16) | \
+ (((mag)&0xffff)) ) )
/* Text segment offset. */
#define N_TXTOFF(ex) \
- ((ex).a_magic == ZMAGIC ? __LDPGSZ : sizeof(struct exec))
+ (N_GETMAGIC(ex) == ZMAGIC ? __LDPGSZ : (N_GETMAGIC(ex) == QMAGIC || \
+ N_GETMAGIC_NET(ex) == ZMAGIC) ? 0 : sizeof(struct exec))
/* Data segment offset. */
#define N_DATOFF(ex) \
- (N_TXTOFF(ex) + ((ex).a_magic != ZMAGIC ? (ex).a_text : \
- __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1))))
+ N_ALIGN(ex, N_TXTOFF(ex) + (ex).a_text)
+
+/* Relocation table offset. */
+#define N_RELOFF(ex) \
+ N_ALIGN(ex, N_DATOFF(ex) + (ex).a_data)
/* Symbol table offset. */
#define N_SYMOFF(ex) \
- (N_TXTOFF(ex) + (ex).a_text + (ex).a_data + (ex).a_trsize + \
- (ex).a_drsize)
+ (N_RELOFF(ex) + (ex).a_trsize + (ex).a_drsize)
/* String table offset. */
#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms)
-/* Description of the object file header (a.out format). */
+/*
+ * Header prepended to each a.out file.
+ * only manipulate the a_midmag field via the
+ * N_SETMAGIC/N_GET{MAGIC,MID,FLAG} macros in a.out.h
+ */
+
struct exec {
-#define OMAGIC 0407 /* old impure format */
-#define NMAGIC 0410 /* read-only text */
-#define ZMAGIC 0413 /* demand load format */
- long a_magic; /* magic number */
-
- u_long a_text; /* text segment size */
- u_long a_data; /* initialized data size */
- u_long a_bss; /* uninitialized data size */
- u_long a_syms; /* symbol table size */
- u_long a_entry; /* entry point */
- u_long a_trsize; /* text relocation size */
- u_long a_drsize; /* data relocation size */
+unsigned long a_midmag; /* htonl(flags<<26 | mid<<16 | magic) */
+unsigned long a_text; /* text segment size */
+unsigned long a_data; /* initialized data size */
+unsigned long a_bss; /* uninitialized data size */
+unsigned long a_syms; /* symbol table size */
+unsigned long a_entry; /* entry point */
+unsigned long a_trsize; /* text relocation size */
+unsigned long a_drsize; /* data relocation size */
};
+#define a_magic a_midmag /* XXX Hack to work with current kern_execve.c */
+
+/* a_magic */
+#define OMAGIC 0407 /* old impure format */
+#define NMAGIC 0410 /* read-only text */
+#define ZMAGIC 0413 /* demand load format */
+#define QMAGIC 0314 /* "compact" demand load format */
+
+#endif /* !_EXEC_H_ */
diff --git a/sys/amd64/include/frame.h b/sys/amd64/include/frame.h
index 05bf265..db2993e 100644
--- a/sys/amd64/include/frame.h
+++ b/sys/amd64/include/frame.h
@@ -100,6 +100,32 @@ struct intrframe {
int if_ss;
};
+/* frame of clock (same as interrupt frame) */
+
+struct clockframe {
+ int cf_vec;
+ int cf_ppl;
+ int cf_es;
+ int cf_ds;
+ int cf_edi;
+ int cf_esi;
+ int cf_ebp;
+ int :32;
+ int cf_ebx;
+ int cf_edx;
+ int cf_ecx;
+ int cf_eax;
+ int :32; /* for compat with trap frame - trapno */
+ int :32; /* for compat with trap frame - err */
+ /* below portion defined in 386 hardware */
+ int cf_eip;
+ int cf_cs;
+ int cf_eflags;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int cf_esp;
+ int cf_ss;
+};
+
/*
* Signal frame
*/
diff --git a/sys/amd64/include/pcb.h b/sys/amd64/include/pcb.h
index a7a29df..990e5f9 100644
--- a/sys/amd64/include/pcb.h
+++ b/sys/amd64/include/pcb.h
@@ -79,6 +79,13 @@ struct pcb {
int pcb_cmap2; /* XXX temporary PTE - will prefault instead */
};
+/*
+ * The pcb is augmented with machine-dependent additional data for
+ * core dumps. For the i386: ???
+ */
+struct md_coredump {
+};
+
#ifdef KERNEL
extern struct pcb *curpcb; /* our current running pcb */
#endif
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 74f002d..7ddcebd 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -48,75 +48,8 @@
#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_ 1
-#include "vm/vm_prot.h"
-/*
- * 386 page table entry and page table directory
- * W.Jolitz, 8/89
- */
-struct pde
-{
-unsigned int
- pd_v:1, /* valid bit */
- pd_prot:2, /* access control */
- pd_mbz1:2, /* reserved, must be zero */
- pd_u:1, /* hardware maintained 'used' bit */
- :1, /* not used */
- pd_mbz2:2, /* reserved, must be zero */
- :3, /* reserved for software */
- pd_pfnum:20; /* physical page frame number of pte's*/
-};
-
-#define PD_MASK 0xffc00000UL /* page directory address bits */
-#define PT_MASK 0x003ff000UL /* page table address bits */
-#define PD_SHIFT 22 /* page directory address shift */
-#define PG_SHIFT 12 /* page table address shift */
-
-struct pte
-{
-unsigned int
- pg_v:1, /* valid bit */
- pg_prot:2, /* access control */
- pg_mbz1:2, /* reserved, must be zero */
- pg_u:1, /* hardware maintained 'used' bit */
- pg_m:1, /* hardware maintained modified bit */
- pg_mbz2:2, /* reserved, must be zero */
- pg_w:1, /* software, wired down page */
- :1, /* software (unused) */
- pg_nc:1, /* 'uncacheable page' bit */
- pg_pfnum:20; /* physical page frame number */
-};
-
-#define PG_V 0x00000001
-#define PG_RO 0x00000000
-#define PG_RW 0x00000002
-#define PG_u 0x00000004
-#define PG_PROT 0x00000006 /* all protection bits . */
-#define PG_W 0x00000200
-#define PG_N 0x00000800 /* Non-cacheable */
-#define PG_M 0x00000040
-#define PG_U 0x00000020
-#define PG_FRAME 0xfffff000UL
-
-#define PG_NOACC 0
-#define PG_KR 0x00000000
-#define PG_KW 0x00000002
-#define PG_URKR 0x00000004
-#define PG_URKW 0x00000004
-#define PG_UW 0x00000006
-
-/* Garbage for current bastardized pager that assumes a hp300 */
-#define PG_NV 0
-#define PG_CI 0
-
-/*
- * Page Protection Exception bits
- */
-#define PGEX_P 0x01 /* Protection violation vs. not present */
-#define PGEX_W 0x02 /* during a Write cycle */
-#define PGEX_U 0x04 /* access from User mode (UPL) */
+#include <machine/pte.h>
-/* typedef struct pde pd_entry_t; */ /* page directory entry */
-/* typedef struct pte pt_entry_t; */ /* Mach page table entry */
typedef unsigned int *pd_entry_t;
typedef unsigned int *pt_entry_t;
@@ -129,7 +62,7 @@ typedef unsigned int *pt_entry_t;
* given to the user (NUPDE)
*/
#ifndef NKPT
-#define NKPT 15 /* actual number of kernel pte's */
+#define NKPT 24 /* actual number of kernel pte's */
#endif
#ifndef NKPDE
#define NKPDE 63 /* addressable number of kpte's */
@@ -159,7 +92,6 @@ typedef unsigned int *pt_entry_t;
#ifdef KERNEL
extern pt_entry_t PTmap[], APTmap[], Upte;
extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
-extern pt_entry_t *Sysmap;
extern int IdlePTD; /* physical address of "Idle" state directory */
#endif
diff --git a/sys/amd64/include/proc.h b/sys/amd64/include/proc.h
index 1b9e4a2..92de3af 100644
--- a/sys/amd64/include/proc.h
+++ b/sys/amd64/include/proc.h
@@ -42,9 +42,7 @@
*/
struct mdproc {
int md_flags; /* machine-dependent flags */
-#ifdef notyet
- int *p_regs; /* registers on current frame */
-#endif
+ int *md_regs; /* registers on current frame */
};
/* md_flags */
diff --git a/sys/amd64/include/reg.h b/sys/amd64/include/reg.h
index d20f8d0..2a1f061 100644
--- a/sys/amd64/include/reg.h
+++ b/sys/amd64/include/reg.h
@@ -74,23 +74,33 @@
* use whichver order, defined above, is correct, so that it
* is all invisible to the user.
*/
-struct regs {
+struct reg {
unsigned int r_es;
unsigned int r_ds;
unsigned int r_edi;
unsigned int r_esi;
unsigned int r_ebp;
+ unsigned int r_isp;
unsigned int r_ebx;
unsigned int r_edx;
unsigned int r_ecx;
unsigned int r_eax;
+ unsigned int r_trapno;
+ unsigned int r_err;
unsigned int r_eip;
unsigned int r_cs;
unsigned int r_eflags;
unsigned int r_esp;
unsigned int r_ss;
- unsigned int r_fs;
- unsigned int r_gs;
+};
+
+/*
+ * Register set accessible via /proc/$pid/fpreg
+ */
+struct fpreg {
+#if 0
+ int fpr_xxx; /* not implemented */
+#endif
};
#endif /* _MACHINE_REG_H_ */
diff --git a/sys/amd64/include/signal.h b/sys/amd64/include/signal.h
index 98793f2..16cbef2 100644
--- a/sys/amd64/include/signal.h
+++ b/sys/amd64/include/signal.h
@@ -51,11 +51,25 @@ typedef int sig_atomic_t;
* a non-standard exit is performed.
*/
struct sigcontext {
- int sc_onstack; /* sigstack state to restore */
- int sc_mask; /* signal mask to restore */
- int sc_sp; /* sp to restore */
- int sc_fp; /* fp to restore */
- int sc_ap; /* ap to restore */
- int sc_pc; /* pc to restore */
- int sc_ps; /* psl to restore */
+ int sc_onstack; /* sigstack state to restore */
+ int sc_mask; /* signal mask to restore */
+ int sc_esp; /* machine state */
+ int sc_ebp;
+ int sc_isp;
+ int sc_eip;
+ int sc_efl;
+ int sc_es;
+ int sc_ds;
+ int sc_cs;
+ int sc_ss;
+ int sc_edi;
+ int sc_esi;
+ int sc_ebx;
+ int sc_edx;
+ int sc_ecx;
+ int sc_eax;
+# define sc_sp sc_esp
+# define sc_fp sc_ebp
+# define sc_pc sc_eip
+# define sc_ps sc_efl
};
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
index df90126..05218ad 100644
--- a/sys/amd64/include/vmparam.h
+++ b/sys/amd64/include/vmparam.h
@@ -174,20 +174,6 @@
#define KLSDIST 3 /* klusters advance/retard for seq. fifo */
/*
- * Paging thresholds (see vm_sched.c).
- * Strategy of 1/19/85:
- * lotsfree is 512k bytes, but at most 1/4 of memory
- * desfree is 200k bytes, but at most 1/8 of memory
- * minfree is 64k bytes, but at most 1/2 of desfree
- */
-#define LOTSFREE (512 * 1024)
-#define LOTSFREEFRACT 4
-#define DESFREE (200 * 1024)
-#define DESFREEFRACT 8
-#define MINFREE (64 * 1024)
-#define MINFREEFRACT 2
-
-/*
* There are two clock hands, initially separated by HANDSPREAD bytes
* (but at most all of user memory). The amount of time to reclaim
* a page once the pageout process examines it increases with this
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index d338cd5..e40079a 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -50,6 +50,7 @@
#include "i386/isa/isa.h"
#include "i386/isa/rtc.h"
#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
/* X-tals being what they are, it's nice to be able to fudge this one... */
/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
@@ -71,15 +72,23 @@ static u_int hardclock_divisor;
void
-timerintr(struct intrframe frame)
+clkintr(frame)
+ struct clockframe frame;
{
- timer_func(frame);
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
switch (timer0_state) {
case 0:
break;
case 1:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
timer0_prescale = 0;
}
break;
@@ -96,7 +105,7 @@ timerintr(struct intrframe frame)
break;
case 3:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
disable_intr();
outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
@@ -111,6 +120,7 @@ timerintr(struct intrframe frame)
}
}
+#endif
int
acquire_timer0(int rate, void (*function)() )
@@ -395,16 +405,6 @@ test_inittodr(time_t base)
}
#endif
-
-/*
- * Restart the clock.
- */
-void
-resettodr()
-{
-}
-
-
/*
* Wire clock interrupt in.
*/
@@ -428,3 +428,15 @@ spinwait(int millisecs)
{
DELAY(1000 * millisecs);
}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/amd64/isa/isa.c b/sys/amd64/isa/isa.c
index b0d84ef..32e59e7 100644
--- a/sys/amd64/isa/isa.c
+++ b/sys/amd64/isa/isa.c
@@ -59,6 +59,7 @@
#include "rlist.h"
#include "machine/segments.h"
#include "vm/vm.h"
+#include <machine/spl.h>
#include "i386/isa/isa_device.h"
#include "i386/isa/isa.h"
#include "i386/isa/icu.h"
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
index 00424bf..17400bd 100644
--- a/sys/amd64/isa/npx.c
+++ b/sys/amd64/isa/npx.c
@@ -438,7 +438,7 @@ npxintr(frame)
* in doreti, and the frame for that could easily be set up
* just before it is used).
*/
- curproc->p_regs = (int *)&frame.if_es;
+ curproc->p_md.md_regs = (int *)&frame.if_es;
#ifdef notyet
/*
* Encode the appropriate code for detailed information on
OpenPOWER on IntegriCloud