summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbenno <benno@FreeBSD.org>2001-06-10 02:39:37 +0000
committerbenno <benno@FreeBSD.org>2001-06-10 02:39:37 +0000
commitdced41b010427ac65d6b272c8f71fe0ceb10f957 (patch)
tree397967f14c650ccaee373ac0341e68e9aeef2017
parentb7aa1a45b4088c67846bc31081b843ed0bb05b6c (diff)
downloadFreeBSD-src-dced41b010427ac65d6b272c8f71fe0ceb10f957.zip
FreeBSD-src-dced41b010427ac65d6b272c8f71fe0ceb10f957.tar.gz
Bring in NetBSD code used in the PowerPC port.
Reviewed by: obrien, dfr Obtained from: NetBSD
-rw-r--r--sys/powerpc/aim/clock.c235
-rw-r--r--sys/powerpc/aim/locore.S1373
-rw-r--r--sys/powerpc/aim/machdep.c972
-rw-r--r--sys/powerpc/aim/mmu_oea.c1747
-rw-r--r--sys/powerpc/aim/ofw_machdep.c131
-rw-r--r--sys/powerpc/aim/ofwmagic.S75
-rw-r--r--sys/powerpc/aim/swtch.S249
-rw-r--r--sys/powerpc/aim/trap.c607
-rw-r--r--sys/powerpc/include/bat.h172
-rw-r--r--sys/powerpc/include/bus.h802
-rw-r--r--sys/powerpc/include/cpu.h98
-rw-r--r--sys/powerpc/include/db_machdep.h99
-rw-r--r--sys/powerpc/include/fpu.h71
-rw-r--r--sys/powerpc/include/intr.h163
-rw-r--r--sys/powerpc/include/pcb.h69
-rw-r--r--sys/powerpc/include/pio.h256
-rw-r--r--sys/powerpc/include/pmap.h134
-rw-r--r--sys/powerpc/include/powerpc.h59
-rw-r--r--sys/powerpc/include/proc.h42
-rw-r--r--sys/powerpc/include/psl.h84
-rw-r--r--sys/powerpc/include/pte.h112
-rw-r--r--sys/powerpc/include/reg.h27
-rw-r--r--sys/powerpc/include/signal.h71
-rw-r--r--sys/powerpc/include/stdarg.h123
-rw-r--r--sys/powerpc/include/trap.h93
-rw-r--r--sys/powerpc/include/varargs.h50
-rw-r--r--sys/powerpc/include/vmparam.h124
-rw-r--r--sys/powerpc/powerpc/bcopy.c154
-rw-r--r--sys/powerpc/powerpc/clock.c235
-rw-r--r--sys/powerpc/powerpc/copyinstr.c73
-rw-r--r--sys/powerpc/powerpc/copystr.c70
-rw-r--r--sys/powerpc/powerpc/fubyte.c54
-rw-r--r--sys/powerpc/powerpc/fuswintr.c52
-rw-r--r--sys/powerpc/powerpc/fuword.c54
-rw-r--r--sys/powerpc/powerpc/locore.S1373
-rw-r--r--sys/powerpc/powerpc/locore.s1373
-rw-r--r--sys/powerpc/powerpc/machdep.c972
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c1747
-rw-r--r--sys/powerpc/powerpc/ofw_machdep.c131
-rw-r--r--sys/powerpc/powerpc/ofwmagic.S75
-rw-r--r--sys/powerpc/powerpc/ofwmagic.s75
-rw-r--r--sys/powerpc/powerpc/pmap.c1747
-rw-r--r--sys/powerpc/powerpc/subyte.c61
-rw-r--r--sys/powerpc/powerpc/suswintr.c52
-rw-r--r--sys/powerpc/powerpc/suword.c53
-rw-r--r--sys/powerpc/powerpc/swtch.S249
-rw-r--r--sys/powerpc/powerpc/swtch.s249
-rw-r--r--sys/powerpc/powerpc/syncicache.c98
-rw-r--r--sys/powerpc/powerpc/trap.c607
49 files changed, 17592 insertions, 0 deletions
diff --git a/sys/powerpc/aim/clock.c b/sys/powerpc/aim/clock.c
new file mode 100644
index 0000000..c2a9944
--- /dev/null
+++ b/sys/powerpc/aim/clock.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: clock.c,v 1.9 2000/01/19 02:52:19 msaitoh Exp $ */
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/timetc.h>
+#include <sys/interrupt.h>
+
+#include <vm/vm.h>
+
+#include <dev/ofw/openfirm.h>
+
+#include <machine/clock.h>
+#include <machine/cpu.h>
+
+#if 0 /* XXX */
+#include "adb.h"
+#else
+#define NADB 0
+#endif
+
+/*
+ * Initially we assume a processor with a bus frequency of 12.5 MHz.
+ */
+static u_long ns_per_tick = 80;
+static long ticks_per_intr;
+static volatile u_long lasttb;
+
+#define SECDAY 86400
+#define DIFF19041970 2082844800
+
+#if NADB > 0
+extern int adb_read_date_time __P((int *));
+extern int adb_set_date_time __P((int));
+#endif
+
+static int clockinitted = 0;
+
+void
+inittodr(time_t base)
+{
+ time_t deltat;
+ u_int rtc_time;
+ struct timespec ts;
+
+ /*
+ * If we can't read from RTC, use the fs time.
+ */
+#if NADB > 0
+ if (adb_read_date_time(&rtc_time) < 0)
+#endif
+ {
+ ts.tv_sec = base;
+ ts.tv_nsec = 0;
+ tc_setclock(&ts);
+ return;
+ }
+ clockinitted = 1;
+ ts.tv_sec = rtc_time - DIFF19041970;
+
+ deltat = ts.tv_sec - base;
+ if (deltat < 0) {
+ deltat = -deltat;
+ }
+ if (deltat < 2 * SECDAY) {
+ tc_setclock(&ts);
+ return;
+ }
+
+ printf("WARNING: clock %s %d days",
+ ts.tv_sec < base ? "lost" : "gained", (int)(deltat / SECDAY));
+
+ printf(" -- CHECK AND RESET THE DATE!\n");
+}
+
+/*
+ * Similar to the above
+ */
+void
+resettodr()
+{
+#if NADB > 0
+ u_int rtc_time;
+
+ if (clockinitted) {
+ rtc_time = time.tv_sec + DIFF19041970;
+ adb_set_date_time(rtc_time);
+ }
+#endif
+}
+
+void
+decr_intr(struct clockframe *frame)
+{
+ u_long tb;
+ long tick;
+ int nticks;
+
+ /*
+ * Check whether we are initialized.
+ */
+ if (!ticks_per_intr)
+ return;
+
+ /*
+ * Based on the actual time delay since the last decrementer reload,
+ * we arrange for earlier interrupt next time.
+ */
+ __asm ("mftb %0; mfdec %1" : "=r"(tb), "=r"(tick));
+ for (nticks = 0; tick < 0; nticks++)
+ tick += ticks_per_intr;
+ __asm __volatile ("mtdec %0" :: "r"(tick));
+ /*
+ * lasttb is used during microtime. Set it to the virtual
+ * start of this tick interval.
+ */
+ lasttb = tb + tick - ticks_per_intr;
+
+#if 0 /* XXX */
+ intrcnt[CNT_CLOCK]++;
+ {
+ int pri;
+ int msr;
+
+ pri = splclock();
+ if (pri & (1 << SPL_CLOCK)) {
+ tickspending += nticks;
+ }
+ else {
+ nticks += tickspending;
+ tickspending = 0;
+
+ /*
+ * Reenable interrupts
+ */
+ __asm __volatile ("mfmsr %0; ori %0, %0, %1; mtmsr %0"
+ : "=r"(msr) : "K"(PSL_EE));
+
+ /*
+ * Do standard timer interrupt stuff.
+ * Do softclock stuff only on the last iteration.
+ */
+ frame->pri = pri | (1 << SIR_CLOCK);
+ while (--nticks > 0)
+ hardclock(frame);
+ frame->pri = pri;
+ hardclock(frame);
+ }
+ splx(pri);
+ }
+#endif
+}
+
+void
+cpu_initclocks(void)
+{
+
+ /* Do nothing */
+}
+
+static __inline u_quad_t
+mftb(void)
+{
+ u_long scratch;
+ u_quad_t tb;
+
+ __asm ("1: mftbu %0; mftb %0+1; mftbu %1; cmpw 0,%0,%1; bne 1b"
+ : "=r"(tb), "=r"(scratch));
+ return tb;
+}
+
+/*
+ * Wait for about n microseconds (at least!).
+ */
+void
+delay(unsigned n)
+{
+ u_quad_t tb;
+ u_long tbh, tbl, scratch;
+
+ tb = mftb();
+ tb += (n * 1000 + ns_per_tick - 1) / ns_per_tick;
+ tbh = tb >> 32;
+ tbl = tb;
+ __asm ("1: mftbu %0; cmplw %0,%1; blt 1b; bgt 2f;"
+ "mftb %0; cmplw %0,%2; blt 1b; 2:"
+ :: "r"(scratch), "r"(tbh), "r"(tbl));
+}
+
+/*
+ * Nothing to do.
+ */
+void
+setstatclockrate(int arg)
+{
+
+ /* Do nothing */
+}
diff --git a/sys/powerpc/aim/locore.S b/sys/powerpc/aim/locore.S
new file mode 100644
index 0000000..2ede211
--- /dev/null
+++ b/sys/powerpc/aim/locore.S
@@ -0,0 +1,1373 @@
+/* $FreeBSD$ */
+/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
+
+/*
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_ddb.h"
+#include "opt_ipkdb.h"
+#include "assym.s"
+
+#include <sys/syscall.h>
+
+#include <machine/trap.h>
+#include <machine/param.h>
+#include <machine/pmap.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+
+/*
+ * Some instructions gas doesn't understand (yet?)
+ */
+#define bdneq bdnzf 2,
+
+/*
+ * Globals
+ */
+ .data
+GLOBAL(tmpstk)
+ .space 8208
+GLOBAL(esym)
+ .long 0 /* end of symbol table */
+GLOBAL(proc0paddr)
+ .long 0 /* proc0 p_addr */
+GLOBAL(PTmap)
+ .long 0 /* PTmap */
+
+GLOBAL(intrnames)
+ .asciz "irq0", "irq1", "irq2", "irq3"
+ .asciz "irq4", "irq5", "irq6", "irq7"
+ .asciz "irq8", "irq9", "irq10", "irq11"
+ .asciz "irq12", "irq13", "irq14", "irq15"
+ .asciz "irq16", "irq17", "irq18", "irq19"
+ .asciz "irq20", "irq21", "irq22", "irq23"
+ .asciz "irq24", "irq25", "irq26", "irq27"
+ .asciz "irq28", "irq29", "irq30", "irq31"
+ .asciz "irq32", "irq33", "irq34", "irq35"
+ .asciz "irq36", "irq37", "irq38", "irq39"
+ .asciz "irq40", "irq41", "irq42", "irq43"
+ .asciz "irq44", "irq45", "irq46", "irq47"
+ .asciz "irq48", "irq49", "irq50", "irq51"
+ .asciz "irq52", "irq53", "irq54", "irq55"
+ .asciz "irq56", "irq57", "irq58", "irq59"
+ .asciz "irq60", "irq61", "irq62", "irq63"
+ .asciz "clock", "softclock", "softnet", "softserial"
+GLOBAL(eintrnames)
+ .align 4
+GLOBAL(intrcnt)
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0
+GLOBAL(eintrcnt)
+
+GLOBAL(ofmsr)
+ .long 0 /* msr used in Open Firmware */
+
+GLOBAL(powersave)
+ .long 0
+
+/*
+ * File-scope for locore.S
+ */
+idle_u:
+ .long 0 /* fake uarea during idle after exit */
+openfirmware_entry:
+ .long 0 /* openfirmware entry point */
+srsave:
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
+/*
+ * This symbol is here for the benefit of kvm_mkdb, and is supposed to
+ * mark the start of kernel text.
+ */
+ .text
+ .globl kernel_text
+kernel_text:
+
+/*
+ * Startup entry. Note, this must be the first thing in the text
+ * segment!
+ */
+ .text
+ .globl start
+start:
+#ifdef FIRMWORKSBUGS
+ mfmsr 0
+ andi. 0,0,PSL_IR|PSL_DR
+ beq 1f
+
+ bl ofwr_init
+1:
+#endif
+ li 8,0
+ li 9,0x100
+ mtctr 9
+1:
+ dcbf 0,8
+ icbi 0,8
+ addi 8,8,0x20
+ bdnz 1b
+ sync
+ isync
+
+ mtibatu 0,0
+ mtibatu 1,0
+ mtibatu 2,0
+ mtibatu 3,0
+ mtdbatu 0,0
+ mtdbatu 1,0
+ mtdbatu 2,0
+ mtdbatu 3,0
+
+ li 9,0x12
+ mtibatl 0,9
+ mtdbatl 0,9
+ li 9,0x1ffe
+ mtibatu 0,9
+ mtdbatu 0,9
+ isync
+
+ lis 8,openfirmware_entry@ha
+ stw 5,openfirmware_entry@l(8) /* save client interface handler */
+ mr 3,5
+
+ lis 1,tmpstk@ha
+ addi 1,1,tmpstk@l
+ addi 1,1,8192
+
+ mfmsr 0
+ lis 9,ofmsr@ha
+ stw 0,ofmsr@l(9)
+
+ bl OF_init
+
+ lis 4,end@ha
+ addi 4,4,end@l
+ mr 5,4
+ li 9,PAGE_MASK
+ add 4,4,9
+ andc 4,4,9
+ lis 9,OF_buf@ha
+ stw 4,OF_buf@l(9)
+ addi 4,4,PAGE_SIZE
+ lis 9,proc0paddr@ha
+ stw 4,proc0paddr@l(9)
+ addi 4,4,USPACE-FRAMELEN
+ mr 1,4
+ xor 0,0,0
+ stwu 0,-16(1)
+
+ lis 3,kernel_text@ha
+ addi 3,3,kernel_text@l
+#if 0
+ mr 5,6
+#endif
+
+ bl powerpc_init
+ bl mi_startup
+ b OF_exit
+
+#if 0 /* XXX: We may switch back to this in the future. */
+/*
+ * OpenFirmware entry point
+ */
+ENTRY(openfirmware)
+ mflr 0 /* save return address */
+ stw 0,4(1)
+ stwu 1,-16(1) /* setup stack frame */
+
+ mfmsr 4 /* save msr */
+ stw 4,8(1)
+
+ lis 4,openfirmware_entry@ha /* get firmware entry point */
+ lwz 4,openfirmware_entry@l(4)
+ mtlr 4
+
+ li 0,0 /* clear battable translations */
+ mtdbatu 2,0
+ mtdbatu 3,0
+ mtibatu 2,0
+ mtibatu 3,0
+
+ lis 4,ofmsr@ha /* Open Firmware msr */
+ lwz 4,ofmsr@l(4)
+ mtmsr 4
+ isync
+
+ lis 4,srsave@ha /* save old SR */
+ addi 4,4,srsave@l
+ li 5,0
+1: mfsrin 0,5
+ stw 0,0(4)
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+
+ lis 4,ofw_pmap@ha /* load OFW SR */
+ addi 4,4,ofw_pmap@l
+ lwz 0,PM_KERNELSR(4)
+ cmpwi 0,0 /* pm_sr[KERNEL_SR] == 0? */
+ beq 2f /* then skip (not initialized yet) */
+ li 5,0
+1: lwz 0,0(4)
+ mtsrin 0,5
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+2:
+ blrl /* call Open Firmware */
+
+ mfmsr 4
+ li 5,PSL_IR|PSL_DR
+ andc 4,4,5
+ mtmsr 4
+ isync
+
+ lis 4,srsave@ha /* restore saved SR */
+ addi 4,4,srsave@l
+ li 5,0
+1: lwz 0,0(4)
+ mtsrin 0,5
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+
+ lwz 4,8(1) /* restore msr */
+ mtmsr 4
+ isync
+
+ lwz 1,0(1) /* and return */
+ lwz 0,4(1)
+ mtlr 0
+ blr
+#endif
+
+/*
+ * Switch to/from OpenFirmware real mode stack
+ *
+ * Note: has to be called as the very first thing in OpenFirmware interface
+ * routines.
+ * E.g.:
+ * int
+ * OF_xxx(arg1, arg2)
+ * type arg1, arg2;
+ * {
+ * static struct {
+ * char *name;
+ * int nargs;
+ * int nreturns;
+ * char *method;
+ * int arg1;
+ * int arg2;
+ * int ret;
+ * } args = {
+ * "xxx",
+ * 2,
+ * 1,
+ * };
+ *
+ * ofw_stack();
+ * args.arg1 = arg1;
+ * args.arg2 = arg2;
+ * if (openfirmware(&args) < 0)
+ * return -1;
+ * return args.ret;
+ * }
+ */
+
+ .local firmstk
+ .comm firmstk,PAGE_SIZE,8
+
+ENTRY(ofw_stack)
+ mfmsr 8 /* turn off interrupts */
+ andi. 0,8,~(PSL_EE|PSL_RI)@l
+ mtmsr 0
+ stw 8,4(1) /* abuse return address slot */
+
+ lwz 5,0(1) /* get length of stack frame */
+ subf 5,1,5
+
+ lis 7,firmstk+PAGE_SIZE-8@ha
+ addi 7,7,firmstk+PAGE_SIZE-8@l
+ lis 6,ofw_back@ha
+ addi 6,6,ofw_back@l
+ subf 4,5,7 /* make room for stack frame on
+ new stack */
+ stw 6,-4(7) /* setup return pointer */
+ stwu 1,-8(7)
+
+ stw 7,-8(4)
+
+ addi 3,1,8
+ addi 1,4,-8
+ subi 5,5,8
+
+ cmpw 3,4
+ beqlr
+
+ mr 0,5
+ addi 5,5,-1
+ cmpwi 0,0
+ beqlr
+
+1: lwz 0,0(3)
+ stw 0,0(4)
+ addi 3,3,1
+ addi 4,4,1
+ mr 0,5
+ addi 5,5,-1
+ cmpwi 0,0
+ bne 1b
+ blr
+
+ofw_back:
+ lwz 1,0(1) /* get callers original stack pointer */
+
+ lwz 0,4(1) /* get saved msr from abused slot */
+ mtmsr 0
+
+ lwz 1,0(1) /* return */
+ lwz 0,4(1)
+ mtlr 0
+ blr
+
+/*
+ * Data used during primary/secondary traps/interrupts
+ */
+#define tempsave 0x2e0 /* primary save area for trap handling */
+#define disisave 0x3e0 /* primary save area for dsi/isi traps */
+
+#define INTSTK (8*1024) /* 8K interrupt stack */
+ .data
+ .align 4
+intstk:
+ .space INTSTK /* interrupt stack */
+
+GLOBAL(intr_depth)
+ .long -1 /* in-use marker */
+
+#define SPILLSTK 1024 /* 1K spill stack */
+
+ .comm spillstk,SPILLSTK,8
+
+/*
+ * This code gets copied to all the trap vectors
+ * (except ISI/DSI, ALI, the interrupts, and possibly the debugging
+ * traps when using IPKDB).
+ */
+ .text
+ .globl trapcode,trapsize
+trapcode:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+/* Test whether we already had PR set */
+ mfsrr1 31
+ mtcr 31
+ bc 4,17,1f /* branch if PSL_PR is clear */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+1:
+ bla s_trap
+trapsize = .-trapcode
+
+/*
+ * For ALI: has to save DSISR and DAR
+ */
+ .globl alitrap,alisize
+alitrap:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mfdar 30
+ mfdsisr 31
+ stmw 30,tempsave+16(0)
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+/* Test whether we already had PR set */
+ mfsrr1 31
+ mtcr 31
+ bc 4,17,1f /* branch if PSL_PR is clear */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+1:
+ bla s_trap
+alisize = .-alitrap
+
+/*
+ * Similar to the above for DSI
+ * Has to handle BAT spills
+ * and standard pagetable spills
+ */
+ .globl dsitrap,dsisize
+dsitrap:
+ stmw 28,disisave(0) /* free r28-r31 */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ mtsprg 2,30 /* in SPRG2 */
+ mfsrr1 31 /* test kernel mode */
+ mtcr 31
+ bc 12,17,1f /* branch if PSL_PR is set */
+ mfdar 31 /* get fault address */
+ rlwinm 31,31,7,25,28 /* get segment * 8 */
+
+ /* get batu */
+ addis 31,31,battable@ha
+ lwz 30,battable@l(31)
+ mtcr 30
+ bc 4,30,1f /* branch if supervisor valid is
+ false */
+ /* get batl */
+ lwz 31,battable+4@l(31)
+/* We randomly use the highest two bat registers here */
+ mftb 28
+ andi. 28,28,1
+ bne 2f
+ mtdbatu 2,30
+ mtdbatl 2,31
+ b 3f
+2:
+ mtdbatu 3,30
+ mtdbatl 3,31
+3:
+ mfsprg 30,2 /* restore XER */
+ mtxer 30
+ mtcr 29 /* restore CR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+1:
+ mflr 28 /* save LR */
+ bla s_dsitrap
+dsisize = .-dsitrap
+
+/*
+ * Similar to the above for ISI
+ */
+ .globl isitrap,isisize
+isitrap:
+ stmw 28,disisave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfsrr1 31 /* test kernel mode */
+ mtcr 31
+ bc 12,17,1f /* branch if PSL_PR is set */
+ mfsrr0 31 /* get fault address */
+ rlwinm 31,31,7,25,28 /* get segment * 8 */
+
+ /* get batu */
+ addis 31,31,battable@ha
+ lwz 30,battable@l(31)
+ mtcr 30
+ bc 4,30,1f /* branch if supervisor valid is
+ false */
+ mtibatu 3,30
+
+ /* get batl */
+ lwz 30,battable+4@l(31)
+ mtibatl 3,30
+
+ mtcr 29 /* restore CR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+1:
+ bla s_isitrap
+isisize = .-isitrap
+
+/*
+ * This one for the external interrupt handler.
+ */
+ .globl extint,extsize
+extint:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ lis 1,intstk+INTSTK@ha /* get interrupt stack */
+ addi 1,1,intstk+INTSTK@l
+ lwz 31,0(1) /* were we already running on intstk? */
+ addic. 31,31,1
+ stw 31,0(1)
+ beq 1f
+ mfsprg 1,1 /* yes, get old SP */
+1:
+ ba extintr
+extsize = .-extint
+
+/*
+ * And this one for the decrementer interrupt handler.
+ */
+ .globl decrint,decrsize
+decrint:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ lis 1,intstk+INTSTK@ha /* get interrupt stack */
+ addi 1,1,intstk+INTSTK@l
+ lwz 31,0(1) /* were we already running on intstk? */
+ addic. 31,31,1
+ stw 31,0(1)
+ beq 1f
+ mfsprg 1,1 /* yes, get old SP */
+1:
+ ba decrintr
+decrsize = .-decrint
+
+/*
+ * Now the tlb software load for 603 processors:
+ * (Code essentially from the 603e User Manual, Chapter 5, but
+ * corrected a lot.)
+ */
+#define DMISS 976
+#define DCMP 977
+#define HASH1 978
+#define HASH2 979
+#define IMISS 980
+#define ICMP 981
+#define RPA 982
+
+ .globl tlbimiss,tlbimsize
+tlbimiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,ICMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ andi. 3,1,8 /* check G-bit */
+ bne 4f /* if guarded, take ISI */
+ mtctr 0 /* restore counter */
+ mfspr 0,IMISS /* get the miss address for the tlbli */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ ori 1,1,0x100 /* set the reference bit */
+ mtspr RPA,1 /* set the pte */
+ srwi 1,1,8 /* get byte 7 of pte */
+ tlbli 0 /* load the itlb */
+ stb 1,6(2) /* update page table */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+4: /* guarded */
+ mfsrr1 3
+ andi. 2,3,0xffff /* clean upper srr1 */
+ oris 2,2,0x8000000@h /* set srr<4> to flag prot violation */
+ b 6f
+5: /* not found anywhere */
+ mfsrr1 3
+ andi. 2,3,0xffff /* clean upper srr1 */
+ oris 2,2,0x40000000@h /* set srr1<1> to flag pte not found */
+6:
+ mtctr 0 /* restore counter */
+ mtsrr1 2
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_ISI
+tlbimsize = .-tlbimiss
+
+ .globl tlbdlmiss,tlbdlmsize
+tlbdlmiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,DCMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ mtctr 0 /* restore counter */
+ mfspr 0,DMISS /* get the miss address for the tlbld */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ ori 1,1,0x100 /* set the reference bit */
+ mtspr RPA,1 /* set the pte */
+ srwi 1,1,8 /* get byte 7 of pte */
+ tlbld 0 /* load the dtlb */
+ stb 1,6(2) /* update page table */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+5: /* not found anywhere */
+ mfsrr1 3
+ lis 1,0x40000000@h /* set dsisr<1> to flag pte not found */
+ mtctr 0 /* restore counter */
+ andi. 2,3,0xffff /* clean upper srr1 */
+ mtsrr1 2
+ mtdsisr 1 /* load the dsisr */
+ mfspr 1,DMISS /* get the miss address */
+ mtdar 1 /* put in dar */
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_DSI
+tlbdlmsize = .-tlbdlmiss
+
+ .globl tlbdsmiss,tlbdsmsize
+tlbdsmiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,DCMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ andi. 3,1,0x80 /* check the C-bit */
+ beq 4f
+5:
+ mtctr 0 /* restore counter */
+ mfspr 0,DMISS /* get the miss address for the tlbld */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ mtspr RPA,1 /* set the pte */
+ tlbld 0 /* load the dtlb */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+4: /* found, but C-bit = 0 */
+ rlwinm. 3,1,30,0,1 /* test PP */
+ bge- 7f
+ andi. 3,1,1
+ beq+ 8f
+9: /* found, but protection violation (PP==00)*/
+ mfsrr1 3
+ lis 1,0xa000000@h /* indicate protection violation
+ on store */
+ b 1f
+7: /* found, PP=1x */
+ mfspr 3,DMISS /* get the miss address */
+ mfsrin 1,3 /* get the segment register */
+ mfsrr1 3
+ rlwinm 3,3,18,31,31 /* get PR-bit */
+ rlwnm. 2,2,3,1,1 /* get the key */
+ bne- 9b /* protection violation */
+8: /* found, set reference/change bits */
+ lwz 1,4(2) /* reload tlb entry */
+ ori 1,1,0x180
+ sth 1,6(2)
+ b 5b
+5: /* not found anywhere */
+ mfsrr1 3
+ lis 1,0x42000000@h /* set dsisr<1> to flag pte not found */
+ /* dsisr<6> to flag store */
+1:
+ mtctr 0 /* restore counter */
+ andi. 2,3,0xffff /* clean upper srr1 */
+ mtsrr1 2
+ mtdsisr 1 /* load the dsisr */
+ mfspr 1,DMISS /* get the miss address */
+ mtdar 1 /* put in dar */
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_DSI
+tlbdsmsize = .-tlbdsmiss
+
+#ifdef DDB
+#define ddbsave 0xde0 /* primary save area for DDB */
+/*
+ * In case of DDB we want a separate trap catcher for it
+ */
+ .local ddbstk
+ .comm ddbstk,INTSTK,8 /* ddb stack */
+
+ .globl ddblow,ddbsize
+ddblow:
+ mtsprg 1,1 /* save SP */
+ stmw 28,ddbsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ lis 1,ddbstk+INTSTK@ha /* get new SP */
+ addi 1,1,ddbstk+INTSTK@l
+ bla ddbtrap
+ddbsize = .-ddblow
+#endif /* DDB */
+
+#ifdef IPKDB
+#define ipkdbsave 0xde0 /* primary save area for IPKDB */
+/*
+ * In case of IPKDB we want a separate trap catcher for it
+ */
+
+ .local ipkdbstk
+ .comm ipkdbstk,INTSTK,8 /* ipkdb stack */
+
+ .globl ipkdblow,ipkdbsize
+ipkdblow:
+ mtsprg 1,1 /* save SP */
+ stmw 28,ipkdbsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ lis 1,ipkdbstk+INTSTK@ha /* get new SP */
+ addi 1,1,ipkdbstk+INTSTK@l
+ bla ipkdbtrap
+ipkdbsize = .-ipkdblow
+#endif /* IPKDB */
+
+/*
+ * FRAME_SETUP assumes:
+ * SPRG1 SP (1)
+ * savearea r28-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
+ * 28 LR
+ * 29 CR
+ * 1 kernel stack
+ * LR trap type
+ * SRR0/1 as at start of trap
+ */
+#define FRAME_SETUP(savearea) \
+/* Have to enable translation to allow access of kernel stack: */ \
+ mfsrr0 30; \
+ mfsrr1 31; \
+ stmw 30,savearea+24(0); \
+ mfmsr 30; \
+ ori 30,30,(PSL_DR|PSL_IR); \
+ mtmsr 30; \
+ isync; \
+ mfsprg 31,1; \
+ stwu 31,-FRAMELEN(1); \
+ stw 0,FRAME_0+8(1); \
+ stw 31,FRAME_1+8(1); \
+ stw 28,FRAME_LR+8(1); \
+ stw 29,FRAME_CR+8(1); \
+ lmw 28,savearea(0); \
+ stmw 2,FRAME_2+8(1); \
+ lmw 28,savearea+16(0); \
+ mfxer 3; \
+ mfctr 4; \
+ mflr 5; \
+ andi. 5,5,0xff00; \
+ stw 3,FRAME_XER+8(1); \
+ stw 4,FRAME_CTR+8(1); \
+ stw 5,FRAME_EXC+8(1); \
+ stw 28,FRAME_DAR+8(1); \
+ stw 29,FRAME_DSISR+8(1); \
+ stw 30,FRAME_SRR0+8(1); \
+ stw 31,FRAME_SRR1+8(1)
+
+#define FRAME_LEAVE(savearea) \
+/* Now restore regs: */ \
+ lwz 2,FRAME_SRR0+8(1); \
+ lwz 3,FRAME_SRR1+8(1); \
+ lwz 4,FRAME_CTR+8(1); \
+ lwz 5,FRAME_XER+8(1); \
+ lwz 6,FRAME_LR+8(1); \
+ lwz 7,FRAME_CR+8(1); \
+ stw 2,savearea(0); \
+ stw 3,savearea+4(0); \
+ mtctr 4; \
+ mtxer 5; \
+ mtlr 6; \
+ mtsprg 1,7; /* save cr */ \
+ lmw 2,FRAME_2+8(1); \
+ lwz 0,FRAME_0+8(1); \
+ lwz 1,FRAME_1+8(1); \
+ mtsprg 2,2; /* save r2 & r3 */ \
+ mtsprg 3,3; \
+/* Disable translation, machine check and recoverability: */ \
+ mfmsr 2; \
+ andi. 2,2,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \
+ mtmsr 2; \
+ isync; \
+/* Decide whether we return to user mode: */ \
+ lwz 3,savearea+4(0); \
+ mtcr 3; \
+ bc 4,17,1f; /* branch if PSL_PR is false */ \
+/* Restore user & kernel access SR: */ \
+/* lis 2,curpm@ha; get real address of pmap */ \
+/* lwz 2,curpm@l(2); */ \
+/* lwz 3,PM_USRSR(2); */ \
+/* mtsr USER_SR,3; */ \
+/* lwz 3,PM_KERNELSR(2); */ \
+/* mtsr KERNEL_SR,3; */ \
+1: mfsprg 2,1; /* restore cr */ \
+ mtcr 2; \
+ lwz 2,savearea(0); \
+ lwz 3,savearea+4(0); \
+ mtsrr0 2; \
+ mtsrr1 3; \
+ mfsprg 2,2; /* restore r2 & r3 */ \
+ mfsprg 3,3
+
+/*
+ * Preamble code for DSI/ISI traps
+ */
+disitrap:
+ lmw 30,disisave(0)
+ stmw 30,tempsave(0)
+ lmw 30,disisave+8(0)
+ stmw 30,tempsave+8(0)
+ mfdar 30
+ mfdsisr 31
+ stmw 30,tempsave+16(0)
+realtrap:
+/* Test whether we already had PR set */
+ mfsrr1 1
+ mtcr 1
+ mfsprg 1,1 /* restore SP (might have been
+ overwritten) */
+ bc 4,17,s_trap /* branch if PSL_PR is false */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+
+/*
+ * Now the common trap catching code.
+ */
+s_trap:
+/* First have to enable KERNEL mapping */
+ lis 31,KERNEL_SEGMENT@h
+ ori 31,31,KERNEL_SEGMENT@l
+ mtsr KERNEL_SR,31
+ FRAME_SETUP(tempsave)
+/* Now we can recover interrupts again: */
+ mfmsr 7
+ ori 7,7,(PSL_EE|PSL_ME|PSL_RI)@l
+ mtmsr 7
+ isync
+/* Call C trap code: */
+trapagain:
+ addi 3,1,8
+ bl trap
+trapexit:
+/* Disable interrupts: */
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l
+ mtmsr 3
+/* Test AST pending: */
+ lwz 5,FRAME_SRR1+8(1)
+ mtcr 5
+ bc 4,17,1f /* branch if PSL_PR is false */
+ lis 3,astpending@ha
+ lwz 4,astpending@l(3)
+ andi. 4,4,1
+ beq 1f
+#if 0 /* XXX */
+ li 6,EXC_AST
+#endif
+ stw 6,FRAME_EXC+8(1)
+ b trapagain
+1:
+#if 0
+ FRAME_LEAVE(tempsave)
+#endif
+ rfi
+
+/*
+ * Child comes here at the end of a fork.
+ * Mostly similar to the above.
+ */
+ .globl fork_trampoline
+fork_trampoline:
+ xor 3,3,3
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+ mtlr 31
+ mr 3,30
+ blrl /* jump indirect to r31 */
+ b trapexit
+
+/*
+ * DSI second stage fault handler
+ */
+s_dsitrap:
+ mfdsisr 31 /* test whether this may be a
+ spill fault */
+ mtcr 31
+ mtsprg 1,1 /* save SP */
+ bc 4,1,disitrap /* branch if table miss is false */
+ lis 1,spillstk+SPILLSTK@ha
+ addi 1,1,spillstk+SPILLSTK@l /* get spill stack */
+ stwu 1,-52(1)
+ stw 0,48(1) /* save non-volatile registers */
+ stw 3,44(1)
+ stw 4,40(1)
+ stw 5,36(1)
+ stw 6,32(1)
+ stw 7,28(1)
+ stw 8,24(1)
+ stw 9,20(1)
+ stw 10,16(1)
+ stw 11,12(1)
+ stw 12,8(1)
+ mflr 30 /* save trap type */
+ mfctr 31 /* & CTR */
+ mfdar 3
+s_pte_spill:
+ bl pte_spill /* try a spill */
+ or. 3,3,3
+ mtctr 31 /* restore CTR */
+ mtlr 30 /* and trap type */
+ mfsprg 31,2 /* get saved XER */
+ mtxer 31 /* restore XER */
+ lwz 12,8(1) /* restore non-volatile registers */
+ lwz 11,12(1)
+ lwz 10,16(1)
+ lwz 9,20(1)
+ lwz 8,24(1)
+ lwz 7,28(1)
+ lwz 6,32(1)
+ lwz 5,36(1)
+ lwz 4,40(1)
+ lwz 3,44(1)
+ lwz 0,48(1)
+ beq disitrap
+ mfsprg 1,1 /* restore SP */
+ mtcr 29 /* restore CR */
+ mtlr 28 /* restore LR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+
+/*
+ * ISI second stage fault handler
+ */
+s_isitrap:
+ mfsrr1 31 /* test whether this may be a
+ spill fault */
+ mtcr 31
+ mtsprg 1,1 /* save SP */
+ bc 4,1,disitrap /* branch if table miss is false */
+ lis 1,spillstk+SPILLSTK@ha
+ addi 1,1,spillstk+SPILLSTK@l /* get spill stack */
+ stwu 1,-52(1)
+ stw 0,48(1) /* save non-volatile registers */
+ stw 3,44(1)
+ stw 4,40(1)
+ stw 5,36(1)
+ stw 6,32(1)
+ stw 7,28(1)
+ stw 8,24(1)
+ stw 9,20(1)
+ stw 10,16(1)
+ stw 11,12(1)
+ stw 12,8(1)
+ mfxer 30 /* save XER */
+ mtsprg 2,30
+ mflr 30 /* save trap type */
+ mfctr 31 /* & ctr */
+ mfsrr0 3
+ b s_pte_spill /* above */
+
+/*
+ * External interrupt second level handler
+ */
+#define INTRENTER \
+/* Save non-volatile registers: */ \
+ stwu 1,-88(1); /* temporarily */ \
+ stw 0,84(1); \
+ mfsprg 0,1; /* get original SP */ \
+ stw 0,0(1); /* and store it */ \
+ stw 3,80(1); \
+ stw 4,76(1); \
+ stw 5,72(1); \
+ stw 6,68(1); \
+ stw 7,64(1); \
+ stw 8,60(1); \
+ stw 9,56(1); \
+ stw 10,52(1); \
+ stw 11,48(1); \
+ stw 12,44(1); \
+ stw 28,40(1); /* saved LR */ \
+ stw 29,36(1); /* saved CR */ \
+ stw 30,32(1); /* saved XER */ \
+ lmw 28,tempsave(0); /* restore r28-r31 */ \
+ mfctr 6; \
+ lis 5,intr_depth@ha; \
+ lwz 5,intr_depth@l(5); \
+ mfsrr0 4; \
+ mfsrr1 3; \
+ stw 6,28(1); \
+ stw 5,20(1); \
+ stw 4,12(1); \
+ stw 3,8(1); \
+/* interrupts are recoverable here, and enable translation */ \
+ lis 3,(KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@h; \
+ ori 3,3,(KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@l; \
+ mtsr KERNEL_SR,3; \
+ mfmsr 5; \
+ ori 5,5,(PSL_IR|PSL_DR|PSL_RI); \
+ mtmsr 5; \
+ isync
+
+ .globl extint_call
+extintr:
+ INTRENTER
+extint_call:
+ bl extint_call /* to be filled in later */
+
+intr_exit:
+/* Disable interrupts (should already be disabled) and MMU here: */
+ mfmsr 3
+ andi. 3,3,~(PSL_EE|PSL_ME|PSL_RI|PSL_DR|PSL_IR)@l
+ mtmsr 3
+ isync
+/* restore possibly overwritten registers: */
+ lwz 12,44(1)
+ lwz 11,48(1)
+ lwz 10,52(1)
+ lwz 9,56(1)
+ lwz 8,60(1)
+ lwz 7,64(1)
+ lwz 6,8(1)
+ lwz 5,12(1)
+ lwz 4,28(1)
+ lwz 3,32(1)
+ mtsrr1 6
+ mtsrr0 5
+ mtctr 4
+ mtxer 3
+/* Returning to user mode? */
+ mtcr 6 /* saved SRR1 */
+ bc 4,17,1f /* branch if PSL_PR is false */
+ mfsprg 3,0 /* get globaldata */
+ lwz 3,GD_CURPCB(3) /* get curpcb from globaldata */
+ lwz 3,PCB_PMR(3) /* get pmap real address from curpcb */
+ mtsr KERNEL_SR,3
+ lis 3,astpending@ha /* Test AST pending */
+ lwz 4,astpending@l(3)
+ andi. 4,4,1
+ beq 1f
+/* Setup for entry to realtrap: */
+ lwz 3,0(1) /* get saved SP */
+ mtsprg 1,3
+#if 0 /* XXX */
+ li 6,EXC_AST
+#endif
+ stmw 28,tempsave(0) /* establish tempsave again */
+ mtlr 6
+ lwz 28,40(1) /* saved LR */
+ lwz 29,36(1) /* saved CR */
+ lwz 6,68(1)
+ lwz 5,72(1)
+ lwz 4,76(1)
+ lwz 3,80(1)
+ lwz 0,84(1)
+ lis 30,intr_depth@ha /* adjust reentrancy count */
+ lwz 31,intr_depth@l(30)
+ addi 31,31,-1
+ stw 31,intr_depth@l(30)
+ b realtrap
+1:
+/* Here is the normal exit of extintr: */
+ lwz 5,36(1)
+ lwz 6,40(1)
+ mtcr 5
+ mtlr 6
+ lwz 6,68(1)
+ lwz 5,72(1)
+ lis 3,intr_depth@ha /* adjust reentrancy count */
+ lwz 4,intr_depth@l(3)
+ addi 4,4,-1
+ stw 4,intr_depth@l(3)
+ lwz 4,76(1)
+ lwz 3,80(1)
+ lwz 0,84(1)
+ lwz 1,0(1)
+ rfi
+
+/*
+ * Decrementer interrupt second level handler
+ */
+decrintr:
+ INTRENTER
+ addi 3,1,8 /* intr frame */
+ bl decr_intr
+ b intr_exit
+
+#ifdef DDB
+/*
+ * Deliberate entry to ddbtrap
+ */
+ .globl ddb_trap
+ddb_trap:
+ mtsprg 1,1
+ mfmsr 3
+ mtsrr1 3
+ andi. 3,3,~(PSL_EE|PSL_ME)@l
+ mtmsr 3 /* disable interrupts */
+ isync
+ stmw 28,ddbsave(0)
+ mflr 28
+ li 29,EXC_BPT
+ mtlr 29
+ mfcr 29
+ mtsrr0 28
+
+/*
+ * Now the ddb trap catching code.
+ */
+ddbtrap:
+ FRAME_SETUP(ddbsave)
+/* Call C trap code: */
+ addi 3,1,8
+ bl ddb_trap_glue
+ or. 3,3,3
+ bne ddbleave
+/* This wasn't for DDB, so switch to real trap: */
+ lwz 3,FRAME_EXC+8(1) /* save exception */
+ stw 3,ddbsave+8(0)
+ FRAME_LEAVE(ddbsave)
+ mtsprg 1,1 /* prepare for entrance to realtrap */
+ stmw 28,tempsave(0)
+ mflr 28
+ mfcr 29
+ lwz 31,ddbsave+8(0)
+ mtlr 31
+ b realtrap
+ddbleave:
+ FRAME_LEAVE(ddbsave)
+ rfi
+#endif /* DDB */
+
+#ifdef IPKDB
+/*
+ * Deliberate entry to ipkdbtrap
+ */
+ .globl ipkdb_trap
+ipkdb_trap:
+ mtsprg 1,1
+ mfmsr 3
+ mtsrr1 3
+ andi. 3,3,~(PSL_EE|PSL_ME)@l
+ mtmsr 3 /* disable interrupts */
+ isync
+ stmw 28,ipkdbsave(0)
+ mflr 28
+ li 29,EXC_BPT
+ mtlr 29
+ mfcr 29
+ mtsrr0 28
+
+/*
+ * Now the ipkdb trap catching code.
+ */
+ipkdbtrap:
+ FRAME_SETUP(ipkdbsave)
+/* Call C trap code: */
+ addi 3,1,8
+ bl ipkdb_trap_glue
+ or. 3,3,3
+ bne ipkdbleave
+/* This wasn't for IPKDB, so switch to real trap: */
+ lwz 3,FRAME_EXC+8(1) /* save exception */
+ stw 3,ipkdbsave+8(0)
+ FRAME_LEAVE(ipkdbsave)
+ mtsprg 1,1 /* prepare for entrance to realtrap */
+ stmw 28,tempsave(0)
+ mflr 28
+ mfcr 29
+ lwz 31,ipkdbsave+8(0)
+ mtlr 31
+ b realtrap
+ipkdbleave:
+ FRAME_LEAVE(ipkdbsave)
+ rfi
+
+ipkdbfault:
+ ba _ipkdbfault
+_ipkdbfault:
+ mfsrr0 3
+ addi 3,3,4
+ mtsrr0 3
+ li 3,-1
+ rfi
+
+/*
+ * int ipkdbfbyte(unsigned char *p)
+ */
+ .globl ipkdbfbyte
+ipkdbfbyte:
+ li 9,EXC_DSI /* establish new fault routine */
+ lwz 5,0(9)
+ lis 6,ipkdbfault@ha
+ lwz 6,ipkdbfault@l(6)
+ stw 6,0(9)
+#ifdef IPKDBUSERHACK
+ lis 8,ipkdbsr@ha
+ lwz 8,ipkdbsr@l(8)
+ mtsr USER_SR,8
+ isync
+#endif
+ dcbst 0,9 /* flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ lbz 3,0(3) /* fetch data */
+ stw 5,0(9) /* restore previous fault handler */
+ dcbst 0,9 /* and flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ blr
+
+/*
+ * int ipkdbsbyte(unsigned char *p, int c)
+ */
+ .globl ipkdbsbyte
+ipkdbsbyte:
+ li 9,EXC_DSI /* establish new fault routine */
+ lwz 5,0(9)
+ lis 6,ipkdbfault@ha
+ lwz 6,ipkdbfault@l(6)
+ stw 6,0(9)
+#ifdef IPKDBUSERHACK
+ lis 8,ipkdbsr@ha
+ lwz 8,ipkdbsr@l(8)
+ mtsr USER_SR,8
+ isync
+#endif
+ dcbst 0,9 /* flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ mr 6,3
+ xor 3,3,3
+ stb 4,0(6)
+ dcbst 0,6 /* Now do appropriate flushes
+ to data... */
+ sync
+ icbi 0,6 /* and instruction caches */
+ stw 5,0(9) /* restore previous fault handler */
+ dcbst 0,9 /* and flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ blr
+#endif /* IPKDB */
+
+/*
+ * int setfault()
+ *
+ * Similar to setjmp to setup for handling faults on accesses to user memory.
+ * Any routine using this may only call bcopy, either the form below,
+ * or the (currently used) C code optimized, so it doesn't use any non-volatile
+ * registers.
+ */
+ .globl setfault
+setfault:
+ mflr 0
+ mfcr 12
+ mfsprg 4,0
+ lwz 4,GD_CURPCB(4)
+ stw 3,PCB_FAULT(4)
+ stw 0,0(3)
+ stw 1,4(3)
+ stw 2,8(3)
+ stmw 12,12(3)
+ xor 3,3,3
+ blr
+
+/*
+ * Signal "trampoline" code.
+ */
+ .globl sigcode
+sigcode:
+ b sys_exit
+esigcode:
+ .data
+GLOBAL(szsigcode)
+ .long esigcode-sigcode
+ .text
+
diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c
new file mode 100644
index 0000000..e4eb98e
--- /dev/null
+++ b/sys/powerpc/aim/machdep.c
@@ -0,0 +1,972 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include "opt_ddb.h"
+#include "opt_compat.h"
+#include "opt_msgbuf.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/eventhandler.h>
+#include <sys/sysproto.h>
+#include <sys/mutex.h>
+#include <sys/ktr.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/reboot.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/mbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/msgbuf.h>
+#include <sys/exec.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+#include <sys/linker.h>
+#include <sys/cons.h>
+#include <net/netisr.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+#include <sys/user.h>
+#include <sys/ptrace.h>
+#include <machine/bat.h>
+#include <machine/clock.h>
+#include <machine/md_var.h>
+#include <machine/reg.h>
+#include <machine/fpu.h>
+#include <machine/globaldata.h>
+#include <machine/vmparam.h>
+#include <machine/elf.h>
+#include <machine/trap.h>
+#include <machine/powerpc.h>
+#include <dev/ofw/openfirm.h>
+#include <ddb/ddb.h>
+#include <sys/vnode.h>
+#include <fs/procfs/procfs.h>
+#include <machine/sigframe.h>
+
+int cold = 1;
+
+struct mtx sched_lock;
+struct mtx Giant;
+
+struct user *proc0paddr;
+
+char machine[] = "powerpc";
+SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
+
+static char model[128];
+SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, model, 0, "");
+
+char bootpath[256];
+
+#ifdef DDB
+/* start and end of kernel symbol table */
+void *ksym_start, *ksym_end;
+#endif /* DDB */
+
+static void cpu_startup(void *);
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
+
+void powerpc_init(u_int, u_int, u_int, char *);
+
+int save_ofw_mapping(void);
+int restore_ofw_mapping(void);
+
+void install_extint(void (*)(void));
+
+void osendsig(sig_t, int, sigset_t *, u_long);
+
+struct msgbuf *msgbufp = 0;
+
+int bootverbose = 0, Maxmem = 0;
+long dumplo;
+
+vm_offset_t phys_avail[10];
+
+static int chosen;
+
+struct pmap ofw_pmap;
+extern int ofmsr;
+
+struct bat battable[16];
+
+static void identifycpu(void);
+
+static vm_offset_t buffer_sva, buffer_eva;
+vm_offset_t clean_sva, clean_eva;
+static vm_offset_t pager_sva, pager_eva;
+
+static void
+powerpc_ofw_shutdown(void *junk, int howto)
+{
+ if (howto & RB_HALT) {
+ OF_exit();
+ }
+}
+
+static void
+cpu_startup(void *dummy)
+{
+ unsigned int i;
+ caddr_t v;
+ vm_offset_t maxaddr;
+ vm_size_t size;
+ vm_offset_t firstaddr;
+ vm_offset_t minaddr;
+
+ size = 0;
+
+ /*
+ * Good {morning,afternoon,evening,night}.
+ */
+ identifycpu();
+
+ /* startrtclock(); */
+#ifdef PERFMON
+ perfmon_init();
+#endif
+ printf("real memory = %ld (%ldK bytes)\n", ptoa(Maxmem),
+ ptoa(Maxmem) / 1024);
+
+ /*
+ * Display any holes after the first chunk of extended memory.
+ */
+ if (bootverbose) {
+ int indx;
+
+ printf("Physical memory chunk(s):\n");
+ for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
+ int size1 = phys_avail[indx + 1] - phys_avail[indx];
+
+ printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
+ phys_avail[indx], phys_avail[indx + 1] - 1, size1,
+ size1 / PAGE_SIZE);
+ }
+ }
+
+ /*
+ * Calculate callout wheel size
+ */
+ for (callwheelsize = 1, callwheelbits = 0;
+ callwheelsize < ncallout;
+ callwheelsize <<= 1, ++callwheelbits)
+ ;
+ callwheelmask = callwheelsize - 1;
+
+ /*
+ * Allocate space for system data structures.
+ * The first available kernel virtual address is in "v".
+ * As pages of kernel virtual memory are allocated, "v" is incremented.
+ * As pages of memory are allocated and cleared,
+ * "firstaddr" is incremented.
+ * An index into the kernel page table corresponding to the
+ * virtual memory address maintained in "v" is kept in "mapaddr".
+ */
+
+ /*
+ * Make two passes. The first pass calculates how much memory is
+ * needed and allocates it. The second pass assigns virtual
+ * addresses to the various data structures.
+ */
+ firstaddr = 0;
+again:
+ v = (caddr_t)firstaddr;
+
+#define valloc(name, type, num) \
+ (name) = (type *)v; v = (caddr_t)((name)+(num))
+#define valloclim(name, type, num, lim) \
+ (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
+
+ valloc(callout, struct callout, ncallout);
+ valloc(callwheel, struct callout_tailq, callwheelsize);
+
+ /*
+ * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
+ * For the first 64MB of ram nominally allocate sufficient buffers to
+ * cover 1/4 of our ram. Beyond the first 64MB allocate additional
+ * buffers to cover 1/20 of our ram over 64MB.
+ */
+
+ if (nbuf == 0) {
+ int factor;
+
+ factor = 4 * BKVASIZE / PAGE_SIZE;
+ nbuf = 50;
+ if (Maxmem > 1024)
+ nbuf += min((Maxmem - 1024) / factor, 16384 / factor);
+ if (Maxmem > 16384)
+ nbuf += (Maxmem - 16384) * 2 / (factor * 5);
+ }
+ nswbuf = max(min(nbuf/4, 64), 16);
+
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
+ v = bufhashinit(v);
+
+ /*
+ * End of first pass, size has been calculated so allocate memory
+ */
+ if (firstaddr == 0) {
+ size = (vm_size_t)(v - firstaddr);
+ firstaddr = (vm_offset_t)kmem_alloc(kernel_map,
+ round_page(size));
+ if (firstaddr == 0)
+ panic("startup: no room for tables");
+ goto again;
+ }
+
+ /*
+ * End of second pass, addresses have been assigned
+ */
+ if ((vm_size_t)(v - firstaddr) != size)
+ panic("startup: table size inconsistency");
+
+ clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
+ (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf*BKVASIZE));
+ pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
+ (nswbuf*MAXPHYS) + pager_map_size);
+ pager_map->system_map = 1;
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ (16*(ARG_MAX+(PAGE_SIZE*3))));
+
+ /*
+ * XXX: Mbuf system machine-specific initializations should
+ * go here, if anywhere.
+ */
+
+ /*
+ * Initialize callouts
+ */
+ SLIST_INIT(&callfree);
+ for (i = 0; i < ncallout; i++) {
+ callout_init(&callout[i], 0);
+ callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
+ SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
+ }
+
+ for (i = 0; i < callwheelsize; i++) {
+ TAILQ_INIT(&callwheel[i]);
+ }
+
+ mtx_init(&callout_lock, "callout", MTX_SPIN);
+
+#if defined(USERCONFIG)
+#if defined(USERCONFIG_BOOT)
+ if (1)
+#else
+ if (boothowto & RB_CONFIG)
+#endif
+ {
+ userconfig();
+ cninit(); /* the preferred console may have changed */
+ }
+#endif
+
+ printf("avail memory = %ld (%ldK bytes)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count) / 1024);
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+ vm_pager_bufferinit();
+ EVENTHANDLER_REGISTER(shutdown_final, powerpc_ofw_shutdown, 0,
+ SHUTDOWN_PRI_LAST);
+
+#ifdef SMP
+ /*
+ * OK, enough kmem_alloc/malloc state should be up, lets get on with it!
+ */
+ mp_start(); /* fire up the secondaries */
+ mp_announce();
+#endif /* SMP */
+}
+
+void
+identifycpu()
+{
+ int pvr, cpu;
+
+ /*
+ * Find cpu type (Do it by OpenFirmware?)
+ */
+ __asm ("mfpvr %0" : "=r"(pvr));
+ cpu = pvr >> 16;
+ switch (cpu) {
+ case 1:
+ sprintf(model, "601");
+ break;
+ case 3:
+ sprintf(model, "603");
+ break;
+ case 4:
+ sprintf(model, "604");
+ break;
+ case 5:
+ sprintf(model, "602");
+ break;
+ case 6:
+ sprintf(model, "603e");
+ break;
+ case 7:
+ sprintf(model, "603ev");
+ break;
+ case 8:
+ sprintf(model, "750 (G3)");
+ break;
+ case 9:
+ sprintf(model, "604ev");
+ break;
+ case 12:
+ sprintf(model, "7400 (G4)");
+ break;
+ case 20:
+ sprintf(model, "620");
+ break;
+ default:
+ sprintf(model, "Version %x", cpu);
+ break;
+ }
+ sprintf(model + strlen(model), " (Revision %x)", pvr & 0xffff);
+ printf("CPU: PowerPC %s\n", model);
+}
+
+extern char kernel_text[], _end[];
+
+extern void *trapcode, *trapsize;
+extern void *alitrap, *alisize;
+extern void *dsitrap, *dsisize;
+extern void *isitrap, *isisize;
+extern void *decrint, *decrsize;
+extern void *tlbimiss, *tlbimsize;
+extern void *tlbdlmiss, *tlbdlmsize;
+extern void *tlbdsmiss, *tlbdsmsize;
+
+#if 0 /* XXX: interrupt handler. We'll get to this later */
+extern void ext_intr(void);
+#endif
+
+#ifdef DDB
+extern ddblow, ddbsize;
+#endif
+#ifdef IPKDB
+extern ipkdblow, ipkdbsize;
+#endif
+
+static struct globaldata tmpglobal;
+
+void
+powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, char *args)
+{
+ int exc, scratch;
+ struct mem_region *allmem, *availmem, *mp;
+ struct globaldata *globalp;
+
+ /*
+ * Set up BAT0 to only map the lowest 256 MB area
+ */
+ battable[0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
+ battable[0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
+
+ /*
+ * Map PCI memory space.
+ */
+ battable[0x8].batl = BATL(0x80000000, BAT_I, BAT_PP_RW);
+ battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
+
+ battable[0x9].batl = BATL(0x90000000, BAT_I, BAT_PP_RW);
+ battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
+
+ battable[0xa].batl = BATL(0xa0000000, BAT_I, BAT_PP_RW);
+ battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
+
+ /*
+ * Map obio devices.
+ */
+ battable[0xf].batl = BATL(0xf0000000, BAT_I, BAT_PP_RW);
+ battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
+
+ /*
+ * Now setup fixed bat registers
+ *
+ * Note that we still run in real mode, and the BAT
+ * registers were cleared above.
+ */
+ /* BAT0 used for initial 256 MB segment */
+ __asm __volatile ("mtibatl 0,%0; mtibatu 0,%1;"
+ "mtdbatl 0,%0; mtdbatu 0,%1;"
+ :: "r"(battable[0].batl), "r"(battable[0].batu));
+ /*
+ * Set up battable to map all RAM regions.
+ * This is here because mem_regions() call needs bat0 set up.
+ */
+ mem_regions(&allmem, &availmem);
+ for (mp = allmem; mp->size; mp++) {
+ vm_offset_t pa = mp->start & 0xf0000000;
+ vm_offset_t end = mp->start + mp->size;
+
+ do {
+ u_int n = pa >> 28;
+
+ battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
+ battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
+ pa += 0x10000000;
+ } while (pa < end);
+ }
+
+ chosen = OF_finddevice("/chosen");
+ save_ofw_mapping();
+
+ proc0.p_addr = proc0paddr;
+ bzero(proc0.p_addr, sizeof *proc0.p_addr);
+
+ LIST_INIT(&proc0.p_contested);
+
+/* XXX: NetBSDism I _think_. Not sure yet. */
+#if 0
+ curpm = curpcb->pcb_pmreal = curpcb->pcb_pm = kernel_pmap;
+#endif
+
+ /*
+ * Initialise some mutexes.
+ */
+ mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
+ mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
+ mtx_init(&proc0.p_mtx, "process lock", MTX_DEF);
+ mtx_lock(&Giant);
+
+ /*
+ * Initialise console.
+ */
+ cninit();
+
+#ifdef __notyet__ /* Needs some rethinking regarding real/virtual OFW */
+ OF_set_callback(callback);
+#endif
+
+ /*
+ * Set up trap vectors
+ */
+ for (exc = EXC_RSVD; exc <= EXC_LAST; exc += 0x100) {
+ switch (exc) {
+ default:
+ bcopy(&trapcode, (void *)exc, (size_t)&trapsize);
+ break;
+ case EXC_EXI:
+ /*
+ * This one is (potentially) installed during autoconf
+ */
+ break;
+ case EXC_ALI:
+ bcopy(&alitrap, (void *)EXC_ALI, (size_t)&alisize);
+ break;
+ case EXC_DSI:
+ bcopy(&dsitrap, (void *)EXC_DSI, (size_t)&dsisize);
+ break;
+ case EXC_ISI:
+ bcopy(&isitrap, (void *)EXC_ISI, (size_t)&isisize);
+ break;
+ case EXC_DECR:
+ bcopy(&decrint, (void *)EXC_DECR, (size_t)&decrsize);
+ break;
+ case EXC_IMISS:
+ bcopy(&tlbimiss, (void *)EXC_IMISS, (size_t)&tlbimsize);
+ break;
+ case EXC_DLMISS:
+ bcopy(&tlbdlmiss, (void *)EXC_DLMISS, (size_t)&tlbdlmsize);
+ break;
+ case EXC_DSMISS:
+ bcopy(&tlbdsmiss, (void *)EXC_DSMISS, (size_t)&tlbdsmsize);
+ break;
+#if defined(DDB) || defined(IPKDB)
+ case EXC_TRC:
+ case EXC_PGM:
+ case EXC_BPT:
+#if defined(DDB)
+ bcopy(&ddblow, (void *)exc, (size_t)&ddbsize);
+#else
+ bcopy(&ipkdblow, (void *)exc, (size_t)&ipkdbsize);
+#endif
+ break;
+#endif /* DDB || IPKDB */
+ }
+ }
+
+#if 0 /* XXX: coming soon... */
+ /*
+ * external interrupt handler install
+ */
+ install_extint(ext_intr);
+
+ __syncicache((void *)EXC_RST, EXC_LAST - EXC_RST + 0x100);
+#endif
+
+ /*
+ * Now enable translation (and machine checks/recoverable interrupts).
+ */
+ __asm __volatile ("mfmsr %0; ori %0,%0,%1; mtmsr %0; isync"
+ : "=r"(scratch) : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI));
+
+
+ ofmsr &= ~PSL_IP;
+
+ /*
+ * Parse arg string.
+ */
+#ifdef DDB
+ bcopy(args + strlen(args) + 1, &startsym, sizeof(startsym));
+ bcopy(args + strlen(args) + 5, &endsym, sizeof(endsym));
+ if (startsym == NULL || endsym == NULL)
+ startsym = endsym = NULL;
+#endif
+
+ strcpy(bootpath, args);
+ args = bootpath;
+ while (*++args && *args != ' ');
+ if (*args) {
+ *args++ = 0;
+ while (*args) {
+ switch (*args++) {
+ case 'a':
+ boothowto |= RB_ASKNAME;
+ break;
+ case 's':
+ boothowto |= RB_SINGLE;
+ break;
+ case 'd':
+ boothowto |= RB_KDB;
+ break;
+ case 'v':
+ boothowto |= RB_VERBOSE;
+ break;
+ }
+ }
+ }
+
+#ifdef DDB
+ ddb_init((int)((u_int)endsym - (u_int)startsym), startsym, endsym);
+#endif
+#ifdef IPKDB
+ /*
+ * Now trap to IPKDB
+ */
+ ipkdb_init();
+ if (boothowto & RB_KDB)
+ ipkdb_connect(0);
+#endif
+
+ /*
+ * Set the page size.
+ */
+#if 0
+ vm_set_page_size();
+#endif
+
+ /*
+ * Initialize pmap module.
+ */
+ pmap_bootstrap(startkernel, endkernel);
+
+ restore_ofw_mapping();
+
+ /*
+ * Setup the global data for the bootstrap cpu.
+ */
+ globalp = (struct globaldata *) &tmpglobal;
+
+ /*
+ * XXX: Pass 0 as CPU id. This is bad. We need to work out
+ * XXX: which CPU we are somehow.
+ */
+ globaldata_init(globalp, 0, sizeof(struct globaldata));
+ __asm("mtsprg 0,%0\n" :: "r" (globalp));
+
+ PCPU_GET(next_asn) = 1; /* 0 used for proc0 pmap */
+ PCPU_SET(curproc, &proc0);
+ PCPU_SET(spinlocks, NULL);
+}
+
+static int N_mapping;
+static struct {
+ vm_offset_t va;
+ int len;
+ vm_offset_t pa;
+ int mode;
+} ofw_mapping[256];
+
+int
+save_ofw_mapping()
+{
+ int mmui, mmu;
+
+ OF_getprop(chosen, "mmu", &mmui, 4);
+ mmu = OF_instance_to_package(mmui);
+
+ bzero(ofw_mapping, sizeof(ofw_mapping));
+
+ N_mapping =
+ OF_getprop(mmu, "translations", ofw_mapping, sizeof(ofw_mapping));
+ N_mapping /= sizeof(ofw_mapping[0]);
+
+ return 0;
+}
+
+int
+restore_ofw_mapping()
+{
+ int i;
+ struct vm_page pg;
+
+ pmap_pinit(&ofw_pmap);
+
+ ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
+
+ for (i = 0; i < N_mapping; i++) {
+ vm_offset_t pa = ofw_mapping[i].pa;
+ vm_offset_t va = ofw_mapping[i].va;
+ int size = ofw_mapping[i].len;
+
+ if (va < 0x90000000) /* XXX */
+ continue;
+
+ while (size > 0) {
+ pg.phys_addr = pa;
+ pmap_enter(&ofw_pmap, va, &pg, VM_PROT_ALL,
+ VM_PROT_ALL);
+ pa += PAGE_SIZE;
+ va += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+
+ return 0;
+}
+
+void
+bzero(void *buf, size_t len)
+{
+ caddr_t p;
+
+ p = buf;
+
+ while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
+ *p++ = 0;
+ len--;
+ }
+
+ while (len >= sizeof(u_long) * 8) {
+ *(u_long*) p = 0;
+ *((u_long*) p + 1) = 0;
+ *((u_long*) p + 2) = 0;
+ *((u_long*) p + 3) = 0;
+ len -= sizeof(u_long) * 8;
+ *((u_long*) p + 4) = 0;
+ *((u_long*) p + 5) = 0;
+ *((u_long*) p + 6) = 0;
+ *((u_long*) p + 7) = 0;
+ p += sizeof(u_long) * 8;
+ }
+
+ while (len >= sizeof(u_long)) {
+ *(u_long*) p = 0;
+ len -= sizeof(u_long);
+ p += sizeof(u_long);
+ }
+
+ while (len) {
+ *p++ = 0;
+ len--;
+ }
+}
+
+#if 0
+void
+delay(unsigned n)
+{
+ u_long tb;
+
+ do {
+ __asm __volatile("mftb %0" : "=r" (tb));
+ } while (n > (int)(tb & 0xffffffff));
+}
+#endif
+
+void
+osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
+{
+
+ /* XXX: To be done */
+ return;
+}
+
+void
+sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
+{
+
+ /* XXX: To be done */
+ return;
+}
+
+int
+osigreturn(struct proc *p, struct osigreturn_args *uap)
+{
+
+ /* XXX: To be done */
+ return(ENOSYS);
+}
+
+int
+sigreturn(struct proc *p, struct sigreturn_args *uap)
+{
+
+ /* XXX: To be done */
+ return(ENOSYS);
+}
+
+void
+cpu_boot(int howto)
+{
+}
+
+/*
+ * Shutdown the CPU as much as possible.
+ */
+void
+cpu_halt(void)
+{
+
+ OF_exit();
+}
+
+/*
+ * Set set up registers on exec.
+ */
+void
+setregs(struct proc *p, u_long entry, u_long stack, u_long ps_strings)
+{
+ struct trapframe *tf;
+ struct ps_strings arginfo;
+
+ tf = trapframe(p);
+
+ bzero(tf, sizeof *tf);
+ tf->fixreg[1] = -roundup(-stack + 8, 16);
+
+ /*
+ * XXX Machine-independent code has already copied arguments and
+ * XXX environment to userland. Get them back here.
+ */
+ (void)copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo));
+
+ /*
+ * Set up arguments for _start():
+ * _start(argc, argv, envp, obj, cleanup, ps_strings);
+ *
+ * Notes:
+ * - obj and cleanup are the auxilliary and termination
+ * vectors. They are fixed up by ld.elf_so.
+ * - ps_strings is a NetBSD extention, and will be
+ * ignored by executables which are strictly
+ * compliant with the SVR4 ABI.
+ *
+ * XXX We have to set both regs and retval here due to different
+ * XXX calling convention in trap.c and init_main.c.
+ */
+ tf->fixreg[3] = arginfo.ps_nargvstr;
+ tf->fixreg[4] = (register_t)arginfo.ps_argvstr;
+ tf->fixreg[5] = (register_t)arginfo.ps_envstr;
+ tf->fixreg[6] = 0; /* auxillary vector */
+ tf->fixreg[7] = 0; /* termination vector */
+ tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */
+
+ tf->srr0 = entry;
+ tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
+ p->p_addr->u_pcb.pcb_flags = 0;
+}
+
+extern void *extint, *extsize;
+extern u_long extint_call;
+
+#if 0
+void
+install_extint(void (*handler)(void))
+{
+ u_long offset;
+ int omsr, msr;
+
+ offset = (u_long)handler - (u_long)&extint_call;
+
+#ifdef DIAGNOSTIC
+ if (offset > 0x1ffffff)
+ panic("install_extint: too far away");
+#endif
+ __asm __volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
+ : "=r"(omsr), "=r"(msr) : "K"((u_short)~PSL_EE));
+ extint_call = (extint_call & 0xfc000003) | offset;
+ bcopy(&extint, (void *)EXC_EXI, (size_t)&extsize);
+ __syncicache((void *)&extint_call, sizeof extint_call);
+ __syncicache((void *)EXC_EXI, (int)&extsize);
+ __asm __volatile ("mtmsr %0" :: "r"(omsr));
+}
+#endif
+
+#if !defined(DDB)
+void
+Debugger(const char *msg)
+{
+
+ printf("Debugger(\"%s\") called.\n", msg);
+}
+#endif /* !defined(DDB) */
+
+/* XXX: dummy {fill,set}_[fp]regs */
+int
+fill_regs(struct proc *p, struct reg *regs)
+{
+
+ return (ENOSYS);
+}
+
+int
+fill_fpregs(struct proc *p, struct fpreg *fpregs)
+{
+
+ return (ENOSYS);
+}
+
+int
+set_regs(struct proc *p, struct reg *regs)
+{
+
+ return (ENOSYS);
+}
+
+int
+set_fpregs(struct proc *p, struct fpreg *fpregs)
+{
+
+ return (ENOSYS);
+}
+
+int
+ptrace_set_pc(struct proc *p, unsigned long addr)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+int
+ptrace_single_step(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+int
+ptrace_write_u(struct proc *p, vm_offset_t off, long data)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+int
+ptrace_read_u_check(struct proc *p, vm_offset_t addr, size_t len)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+int
+ptrace_clear_single_step(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+/*
+ * Initialise a struct globaldata.
+ */
+void
+globaldata_init(struct globaldata *globaldata, int cpuid, size_t sz)
+{
+
+ bzero(globaldata, sz);
+ globaldata->gd_cpuid = cpuid;
+ globaldata->gd_next_asn = 0;
+ globaldata->gd_current_asngen = 1;
+}
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
new file mode 100644
index 0000000..ae34d23
--- /dev/null
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -0,0 +1,1747 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ */
+ */
+/*
+ * Copyright (C) 2001 Benno Rice.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/msgbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
+
+#include <sys/user.h>
+
+#include <machine/pcb.h>
+#include <machine/powerpc.h>
+#include <machine/pte.h>
+
+pte_t *ptable;
+int ptab_cnt;
+u_int ptab_mask;
+#define HTABSIZE (ptab_cnt * 64)
+
+#define MINPV 2048
+
+struct pte_ovfl {
+ LIST_ENTRY(pte_ovfl) po_list; /* Linked list of overflow entries */
+ struct pte po_pte; /* PTE for this mapping */
+};
+
+LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */
+
+static struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+static int npgs;
+static u_int nextavail;
+
+#ifndef MSGBUFADDR
+extern vm_offset_t msgbuf_paddr;
+#endif
+
+static struct mem_region *mem, *avail;
+
+vm_offset_t avail_start;
+vm_offset_t avail_end;
+vm_offset_t virtual_avail;
+vm_offset_t virtual_end;
+
+vm_offset_t kernel_vm_end;
+
+static int pmap_pagedaemon_waken = 0;
+
+extern unsigned int Maxmem;
+
+#define ATTRSHFT 4
+
+struct pv_entry *pv_table;
+
+static vm_zone_t pvzone;
+static struct vm_zone pvzone_store;
+static struct vm_object pvzone_obj;
+static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
+static struct pv_entry *pvinit;
+
+#if !defined(PMAP_SHPGPERPROC)
+#define PMAP_SHPGPERPROC 200
+#endif
+
+struct pv_page;
+struct pv_page_info {
+ LIST_ENTRY(pv_page) pgi_list;
+ struct pv_entry *pgi_freelist;
+ int pgi_nfree;
+};
+#define NPVPPG ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
+struct pv_page {
+ struct pv_page_info pvp_pgi;
+ struct pv_entry pvp_pv[NPVPPG];
+};
+LIST_HEAD(pv_page_list, pv_page) pv_page_freelist;
+int pv_nfree;
+int pv_pcnt;
+static struct pv_entry *pmap_alloc_pv(void);
+static void pmap_free_pv(struct pv_entry *);
+
+struct po_page;
+struct po_page_info {
+ LIST_ENTRY(po_page) pgi_list;
+ vm_page_t pgi_page;
+ LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist;
+ int pgi_nfree;
+};
+#define NPOPPG ((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl))
+struct po_page {
+ struct po_page_info pop_pgi;
+ struct pte_ovfl pop_po[NPOPPG];
+};
+LIST_HEAD(po_page_list, po_page) po_page_freelist;
+int po_nfree;
+int po_pcnt;
+static struct pte_ovfl *poalloc(void);
+static void pofree(struct pte_ovfl *, int);
+
+static u_int usedsr[NPMAPS / sizeof(u_int) / 8];
+
+static int pmap_initialized;
+
+int pte_spill(vm_offset_t);
+
+/*
+ * These small routines may have to be replaced,
+ * if/when we support processors other that the 604.
+ */
+static __inline void
+tlbie(vm_offset_t ea)
+{
+
+ __asm __volatile ("tlbie %0" :: "r"(ea));
+}
+
+static __inline void
+tlbsync(void)
+{
+
+ __asm __volatile ("sync; tlbsync; sync");
+}
+
+static __inline void
+tlbia(void)
+{
+ vm_offset_t i;
+
+ __asm __volatile ("sync");
+ for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) {
+ tlbie(i);
+ }
+ tlbsync();
+}
+
+static __inline int
+ptesr(sr_t *sr, vm_offset_t addr)
+{
+
+ return sr[(u_int)addr >> ADDR_SR_SHFT];
+}
+
+static __inline int
+pteidx(sr_t sr, vm_offset_t addr)
+{
+ int hash;
+
+ hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
+ return hash & ptab_mask;
+}
+
+static __inline int
+ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which)
+{
+
+ return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) |
+ (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which);
+}
+
+static __inline struct pv_entry *
+pa_to_pv(vm_offset_t pa)
+{
+#if 0 /* XXX */
+ int bank, pg;
+
+ bank = vm_physseg_find(atop(pa), &pg);
+ if (bank == -1)
+ return NULL;
+ return &vm_physmem[bank].pmseg.pvent[pg];
+#endif
+ return (NULL);
+}
+
+static __inline char *
+pa_to_attr(vm_offset_t pa)
+{
+#if 0 /* XXX */
+ int bank, pg;
+
+ bank = vm_physseg_find(atop(pa), &pg);
+ if (bank == -1)
+ return NULL;
+ return &vm_physmem[bank].pmseg.attrs[pg];
+#endif
+ return (NULL);
+}
+
+/*
+ * Try to insert page table entry *pt into the ptable at idx.
+ *
+ * Note: *pt mustn't have PTE_VALID set.
+ * This is done here as required by Book III, 4.12.
+ */
+static int
+pte_insert(int idx, pte_t *pt)
+{
+ pte_t *ptp;
+ int i;
+
+ /*
+ * First try primary hash.
+ */
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (!(ptp->pte_hi & PTE_VALID)) {
+ *ptp = *pt;
+ ptp->pte_hi &= ~PTE_HID;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ return 1;
+ }
+ }
+
+ /*
+ * Then try secondary hash.
+ */
+
+ idx ^= ptab_mask;
+
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (!(ptp->pte_hi & PTE_VALID)) {
+ *ptp = *pt;
+ ptp->pte_hi |= PTE_HID;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Spill handler.
+ *
+ * Tries to spill a page table entry from the overflow area.
+ * Note that this routine runs in real mode on a separate stack,
+ * with interrupts disabled.
+ */
+int
+pte_spill(vm_offset_t addr)
+{
+ int idx, i;
+ sr_t sr;
+ struct pte_ovfl *po;
+ pte_t ps;
+ pte_t *pt;
+
+ __asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
+ idx = pteidx(sr, addr);
+ for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
+ if (ptematch(&po->po_pte, sr, addr, 0)) {
+ /*
+ * Now found an entry to be spilled into the real
+ * ptable.
+ */
+ if (pte_insert(idx, &po->po_pte)) {
+ LIST_REMOVE(po, po_list);
+ pofree(po, 0);
+ return 1;
+ }
+ /*
+ * Have to substitute some entry. Use the primary
+ * hash for this.
+ *
+ * Use low bits of timebase as random generator
+ */
+ __asm ("mftb %0" : "=r"(i));
+ pt = ptable + idx * 8 + (i & 7);
+ pt->pte_hi &= ~PTE_VALID;
+ ps = *pt;
+ __asm __volatile ("sync");
+ tlbie(addr);
+ tlbsync();
+ *pt = po->po_pte;
+ __asm __volatile ("sync");
+ pt->pte_hi |= PTE_VALID;
+ po->po_pte = ps;
+ if (ps.pte_hi & PTE_HID) {
+ /*
+ * We took an entry that was on the alternate
+ * hash chain, so move it to it's original
+ * chain.
+ */
+ po->po_pte.pte_hi &= ~PTE_HID;
+ LIST_REMOVE(po, po_list);
+ LIST_INSERT_HEAD(potable + (idx ^ ptab_mask),
+ po, po_list);
+ }
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This is called during powerpc_init, before the system is really initialized.
+ */
+void
+pmap_bootstrap(u_int kernelstart, u_int kernelend)
+{
+ struct mem_region *mp, *mp1;
+ int cnt, i;
+ u_int s, e, sz;
+
+ /*
+ * Get memory.
+ */
+ mem_regions(&mem, &avail);
+ for (mp = mem; mp->size; mp++)
+ Maxmem += btoc(mp->size);
+
+ /*
+ * Count the number of available entries.
+ */
+ for (cnt = 0, mp = avail; mp->size; mp++) {
+ cnt++;
+ }
+
+ /*
+ * Page align all regions.
+ * Non-page aligned memory isn't very interesting to us.
+ * Also, sort the entries for ascending addresses.
+ */
+ kernelstart &= ~PAGE_MASK;
+ kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK;
+ for (mp = avail; mp->size; mp++) {
+ s = mp->start;
+ e = mp->start + mp->size;
+ /*
+ * Check whether this region holds all of the kernel.
+ */
+ if (s < kernelstart && e > kernelend) {
+ avail[cnt].start = kernelend;
+ avail[cnt++].size = e - kernelend;
+ e = kernelstart;
+ }
+ /*
+ * Look whether this regions starts within the kernel.
+ */
+ if (s >= kernelstart && s < kernelend) {
+ if (e <= kernelend)
+ goto empty;
+ s = kernelend;
+ }
+ /*
+ * Now look whether this region ends within the kernel.
+ */
+ if (e > kernelstart && e <= kernelend) {
+ if (s >= kernelstart)
+ goto empty;
+ e = kernelstart;
+ }
+ /*
+ * Now page align the start and size of the region.
+ */
+ s = round_page(s);
+ e = trunc_page(e);
+ if (e < s) {
+ e = s;
+ }
+ sz = e - s;
+ /*
+ * Check whether some memory is left here.
+ */
+ if (sz == 0) {
+ empty:
+ bcopy(mp + 1, mp,
+ (cnt - (mp - avail)) * sizeof *mp);
+ cnt--;
+ mp--;
+ continue;
+ }
+
+ /*
+ * Do an insertion sort.
+ */
+ npgs += btoc(sz);
+
+ for (mp1 = avail; mp1 < mp; mp1++) {
+ if (s < mp1->start) {
+ break;
+ }
+ }
+
+ if (mp1 < mp) {
+ bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
+ mp1->start = s;
+ mp1->size = sz;
+ } else {
+ mp->start = s;
+ mp->size = sz;
+ }
+ }
+
+#ifdef HTABENTS
+ ptab_cnt = HTABENTS;
+#else
+ ptab_cnt = (Maxmem + 1) / 2;
+
+ /* The minimum is 1024 PTEGs. */
+ if (ptab_cnt < 1024) {
+ ptab_cnt = 1024;
+ }
+
+ /* Round up to power of 2. */
+ __asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1));
+ ptab_cnt = 1 << (32 - i);
+#endif
+
+ /*
+ * Find suitably aligned memory for HTAB.
+ */
+ for (mp = avail; mp->size; mp++) {
+ s = roundup(mp->start, HTABSIZE) - mp->start;
+
+ if (mp->size < s + HTABSIZE) {
+ continue;
+ }
+
+ ptable = (pte_t *)(mp->start + s);
+
+ if (mp->size == s + HTABSIZE) {
+ if (s)
+ mp->size = s;
+ else {
+ bcopy(mp + 1, mp,
+ (cnt - (mp - avail)) * sizeof *mp);
+ mp = avail;
+ }
+ break;
+ }
+
+ if (s != 0) {
+ bcopy(mp, mp + 1,
+ (cnt - (mp - avail)) * sizeof *mp);
+ mp++->size = s;
+ cnt++;
+ }
+
+ mp->start += s + HTABSIZE;
+ mp->size -= s + HTABSIZE;
+ break;
+ }
+
+ if (!mp->size) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(HTABSIZE);
+ bzero((void *)ptable, HTABSIZE);
+ ptab_mask = ptab_cnt - 1;
+
+ /*
+ * We cannot do pmap_steal_memory here,
+ * since we don't run with translation enabled yet.
+ */
+ s = sizeof(struct pte_ovtab) * ptab_cnt;
+ sz = round_page(s);
+
+ for (mp = avail; mp->size; mp++) {
+ if (mp->size >= sz) {
+ break;
+ }
+ }
+
+ if (!mp->size) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(sz);
+ potable = (struct pte_ovtab *)mp->start;
+ mp->size -= sz;
+ mp->start += sz;
+
+ if (mp->size <= 0) {
+ bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
+ }
+
+ for (i = 0; i < ptab_cnt; i++) {
+ LIST_INIT(potable + i);
+ }
+
+#ifndef MSGBUFADDR
+ /*
+ * allow for msgbuf
+ */
+ sz = round_page(MSGBUFSIZE);
+ mp = NULL;
+
+ for (mp1 = avail; mp1->size; mp1++) {
+ if (mp1->size >= sz) {
+ mp = mp1;
+ }
+ }
+
+ if (mp == NULL) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(sz);
+ msgbuf_paddr = mp->start + mp->size - sz;
+ mp->size -= sz;
+
+ if (mp->size <= 0) {
+ bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
+ }
+#endif
+
+ /*
+ * Initialize kernel pmap and hardware.
+ */
+ kernel_pmap = &kernel_pmap_store;
+
+ {
+ int batu, batl;
+
+ batu = 0x80001ffe;
+ batl = 0x80000012;
+
+ __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl));
+ }
+
+
+#if NPMAPS >= KERNEL_SEGMENT / 16
+ usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)]
+ |= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8));
+#endif
+
+#if 0 /* XXX */
+ for (i = 0; i < 16; i++) {
+ kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
+ __asm __volatile ("mtsrin %0,%1"
+ :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
+ }
+#endif
+
+ for (i = 0; i < 16; i++) {
+ int j;
+
+ __asm __volatile ("mfsrin %0,%1"
+ : "=r" (j)
+ : "r" (i << ADDR_SR_SHFT));
+
+ kernel_pmap->pm_sr[i] = j;
+ }
+
+ kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
+ __asm __volatile ("mtsr %0,%1"
+ :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
+
+ __asm __volatile ("sync; mtsdr1 %0; isync"
+ :: "r"((u_int)ptable | (ptab_mask >> 10)));
+
+ tlbia();
+
+ nextavail = avail->start;
+ avail_start = avail->start;
+ for (mp = avail, i = 0; mp->size; mp++) {
+ avail_end = mp->start + mp->size;
+ phys_avail[i++] = mp->start;
+ phys_avail[i++] = mp->start + mp->size;
+ }
+
+ virtual_avail = VM_MIN_KERNEL_ADDRESS;
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+}
+
+/*
+ * Initialize anything else for pmap handling.
+ * Called during vm_init().
+ */
+void
+pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
+{
+ int initial_pvs;
+
+ /*
+ * init the pv free list
+ */
+ initial_pvs = vm_page_array_size;
+ if (initial_pvs < MINPV) {
+ initial_pvs = MINPV;
+ }
+ pvzone = &pvzone_store;
+ pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
+ initial_pvs * sizeof(struct pv_entry));
+ zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit,
+ vm_page_array_size);
+
+ pmap_initialized = TRUE;
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure.
+ */
+void
+pmap_pinit(struct pmap *pm)
+{
+ int i, j;
+
+ /*
+ * Allocate some segment registers for this pmap.
+ */
+ pm->pm_refs = 1;
+ for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) {
+ if (usedsr[i] != 0xffffffff) {
+ j = ffs(~usedsr[i]) - 1;
+ usedsr[i] |= 1 << j;
+ pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16;
+ for (i = 1; i < 16; i++) {
+ pm->pm_sr[i] = pm->pm_sr[i - 1] + 1;
+ }
+ return;
+ }
+ }
+ panic("out of segments");
+}
+
+void
+pmap_pinit2(pmap_t pmap)
+{
+
+ /*
+ * Nothing to be done.
+ */
+ return;
+}
+
+/*
+ * Add a reference to the given pmap.
+ */
+void
+pmap_reference(struct pmap *pm)
+{
+
+ pm->pm_refs++;
+}
+
+/*
+ * Retire the given pmap from service.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_destroy(struct pmap *pm)
+{
+
+ if (--pm->pm_refs == 0) {
+ pmap_release(pm);
+ free((caddr_t)pm, M_VMPGDATA);
+ }
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ */
+void
+pmap_release(struct pmap *pm)
+{
+ int i, j;
+
+ if (!pm->pm_sr[0]) {
+ panic("pmap_release");
+ }
+ i = pm->pm_sr[0] / 16;
+ j = i % (sizeof usedsr[0] * 8);
+ i /= sizeof usedsr[0] * 8;
+ usedsr[i] &= ~(1 << j);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr,
+ vm_size_t len, vm_offset_t src_addr)
+{
+
+ return;
+}
+
+/*
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(void)
+{
+
+ return;
+}
+
+/*
+ * Fill the given physical page with zeroes.
+ */
+void
+pmap_zero_page(vm_offset_t pa)
+{
+#if 0
+ bzero((caddr_t)pa, PAGE_SIZE);
+#else
+ int i;
+
+ for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
+ __asm __volatile ("dcbz 0,%0" :: "r"(pa));
+ pa += CACHELINESIZE;
+ }
+#endif
+}
+
+void
+pmap_zero_page_area(vm_offset_t pa, int off, int size)
+{
+
+ bzero((caddr_t)pa + off, size);
+}
+
+/*
+ * Copy the given physical source page to its destination.
+ */
+void
+pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+{
+
+ bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE);
+}
+
+static struct pv_entry *
+pmap_alloc_pv()
+{
+ pv_entry_count++;
+
+ if (pv_entry_high_water &&
+ (pv_entry_count > pv_entry_high_water) &&
+ (pmap_pagedaemon_waken == 0)) {
+ pmap_pagedaemon_waken = 1;
+ wakeup(&vm_pages_needed);
+ }
+
+ return zalloc(pvzone);
+}
+
+static void
+pmap_free_pv(struct pv_entry *pv)
+{
+
+ pv_entry_count--;
+ zfree(pvzone, pv);
+}
+
+/*
+ * We really hope that we don't need overflow entries
+ * before the VM system is initialized!
+ *
+ * XXX: Should really be switched over to the zone allocator.
+ */
+static struct pte_ovfl *
+poalloc()
+{
+ struct po_page *pop;
+ struct pte_ovfl *po;
+ vm_page_t mem;
+ int i;
+
+ if (!pmap_initialized) {
+ panic("poalloc");
+ }
+
+ if (po_nfree == 0) {
+ /*
+ * Since we cannot use maps for potable allocation,
+ * we have to steal some memory from the VM system. XXX
+ */
+ mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM);
+ po_pcnt++;
+ pop = (struct po_page *)VM_PAGE_TO_PHYS(mem);
+ pop->pop_pgi.pgi_page = mem;
+ LIST_INIT(&pop->pop_pgi.pgi_freelist);
+ for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) {
+ LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po,
+ po_list);
+ }
+ po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1;
+ LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
+ po = pop->pop_po;
+ } else {
+ po_nfree--;
+ pop = po_page_freelist.lh_first;
+ if (--pop->pop_pgi.pgi_nfree <= 0) {
+ LIST_REMOVE(pop, pop_pgi.pgi_list);
+ }
+ po = pop->pop_pgi.pgi_freelist.lh_first;
+ LIST_REMOVE(po, po_list);
+ }
+
+ return po;
+}
+
+static void
+pofree(struct pte_ovfl *po, int freepage)
+{
+ struct po_page *pop;
+
+ pop = (struct po_page *)trunc_page((vm_offset_t)po);
+ switch (++pop->pop_pgi.pgi_nfree) {
+ case NPOPPG:
+ if (!freepage) {
+ break;
+ }
+ po_nfree -= NPOPPG - 1;
+ po_pcnt--;
+ LIST_REMOVE(pop, pop_pgi.pgi_list);
+ vm_page_free(pop->pop_pgi.pgi_page);
+ return;
+ case 1:
+ LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
+ default:
+ break;
+ }
+ LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list);
+ po_nfree++;
+}
+
+/*
+ * This returns whether this is the first mapping of a page.
+ */
+static int
+pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa)
+{
+ struct pv_entry *pv, *npv;
+ int s, first;
+
+ if (!pmap_initialized) {
+ return 0;
+ }
+
+ s = splimp();
+
+ pv = pa_to_pv(pa);
+ first = pv->pv_idx;
+ if (pv->pv_idx == -1) {
+ /*
+ * No entries yet, use header as the first entry.
+ */
+ pv->pv_va = va;
+ pv->pv_idx = pteidx;
+ pv->pv_next = NULL;
+ } else {
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ npv = pmap_alloc_pv();
+ npv->pv_va = va;
+ npv->pv_idx = pteidx;
+ npv->pv_next = pv->pv_next;
+ pv->pv_next = npv;
+ }
+ splx(s);
+ return first;
+}
+
+static void
+pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte)
+{
+ struct pv_entry *pv, *npv;
+ char *attr;
+
+ /*
+ * First transfer reference/change bits to cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return;
+ }
+ *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT;
+
+ /*
+ * Remove from the PV table.
+ */
+ pv = pa_to_pv(pa);
+
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pteidx == pv->pv_idx && va == pv->pv_va) {
+ npv = pv->pv_next;
+ if (npv) {
+ *pv = *npv;
+ pmap_free_pv(npv);
+ } else {
+ pv->pv_idx = -1;
+ }
+ } else {
+ for (; (npv = pv->pv_next); pv = npv) {
+ if (pteidx == npv->pv_idx && va == npv->pv_va) {
+ break;
+ }
+ }
+ if (npv) {
+ pv->pv_next = npv->pv_next;
+ pmap_free_pv(npv);
+ }
+#ifdef DIAGNOSTIC
+ else {
+ panic("pmap_remove_pv: not on list\n");
+ }
+#endif
+ }
+}
+
+/*
+ * Insert physical page at pa into the given pmap at virtual address va.
+ */
+void
+pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot,
+ boolean_t wired)
+{
+ sr_t sr;
+ int idx, s;
+ pte_t pte;
+ struct pte_ovfl *po;
+ struct mem_region *mp;
+ vm_offset_t pa;
+
+ pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK;
+
+ /*
+ * Have to remove any existing mapping first.
+ */
+ pmap_remove(pm, va, va + PAGE_SIZE);
+
+ /*
+ * Compute the HTAB index.
+ */
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ /*
+ * Construct the PTE.
+ *
+ * Note: Don't set the valid bit for correct operation of tlb update.
+ */
+ pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
+ | ((va & ADDR_PIDX) >> ADDR_API_SHFT);
+ pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G;
+
+ for (mp = mem; mp->size; mp++) {
+ if (pa >= mp->start && pa < mp->start + mp->size) {
+ pte.pte_lo &= ~(PTE_I | PTE_G);
+ break;
+ }
+ }
+ if (prot & VM_PROT_WRITE) {
+ pte.pte_lo |= PTE_RW;
+ } else {
+ pte.pte_lo |= PTE_RO;
+ }
+
+ /*
+ * Now record mapping for later back-translation.
+ */
+ if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) {
+ if (pmap_enter_pv(idx, va, pa)) {
+ /*
+ * Flush the real memory from the cache.
+ */
+ __syncicache((void *)pa, PAGE_SIZE);
+ }
+ }
+
+ s = splimp();
+ pm->pm_stats.resident_count++;
+ /*
+ * Try to insert directly into HTAB.
+ */
+ if (pte_insert(idx, &pte)) {
+ splx(s);
+ return;
+ }
+
+ /*
+ * Have to allocate overflow entry.
+ *
+ * Note, that we must use real addresses for these.
+ */
+ po = poalloc();
+ po->po_pte = pte;
+ LIST_INSERT_HEAD(potable + idx, po, po_list);
+ splx(s);
+}
+
+void
+pmap_kenter(vm_offset_t va, vm_offset_t pa)
+{
+ struct vm_page pg;
+
+ pg.phys_addr = pa;
+ pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+}
+
+void
+pmap_kremove(vm_offset_t va)
+{
+ pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
+}
+
+/*
+ * Remove the given range of mapping entries.
+ */
+void
+pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva)
+{
+ int idx, i, s;
+ sr_t sr;
+ pte_t *ptp;
+ struct pte_ovfl *po, *npo;
+
+ s = splimp();
+ while (va < endva) {
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID)) {
+ pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ pm->pm_stats.resident_count--;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
+ ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
+ pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ pm->pm_stats.resident_count--;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = npo) {
+ npo = po->po_list.le_next;
+ if (ptematch(&po->po_pte, sr, va, 0)) {
+ pmap_remove_pv(idx, va, po->po_pte.pte_lo,
+ &po->po_pte);
+ LIST_REMOVE(po, po_list);
+ pofree(po, 1);
+ pm->pm_stats.resident_count--;
+ }
+ }
+ va += PAGE_SIZE;
+ }
+ splx(s);
+}
+
+static pte_t *
+pte_find(struct pmap *pm, vm_offset_t va)
+{
+ int idx, i;
+ sr_t sr;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID)) {
+ return ptp;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
+ return ptp;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
+ if (ptematch(&po->po_pte, sr, va, 0)) {
+ return &po->po_pte;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Get the physical page address for the given pmap/virtual address.
+ */
+vm_offset_t
+pmap_extract(pmap_t pm, vm_offset_t va)
+{
+ pte_t *ptp;
+ int s;
+
+ s = splimp();
+
+ if (!(ptp = pte_find(pm, va))) {
+ splx(s);
+ return (0);
+ }
+ splx(s);
+ return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF));
+}
+
+/*
+ * Lower the protection on the specified range of this pmap.
+ *
+ * There are only two cases: either the protection is going to 0,
+ * or it is going to read-only.
+ */
+void
+pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ pte_t *ptp;
+ int valid, s;
+
+ if (prot & VM_PROT_READ) {
+ s = splimp();
+ while (sva < eva) {
+ ptp = pte_find(pm, sva);
+ if (ptp) {
+ valid = ptp->pte_hi & PTE_VALID;
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(sva);
+ tlbsync();
+ ptp->pte_lo &= ~PTE_PP;
+ ptp->pte_lo |= PTE_RO;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= valid;
+ }
+ sva += PAGE_SIZE;
+ }
+ splx(s);
+ return;
+ }
+ pmap_remove(pm, sva, eva);
+}
+
+boolean_t
+ptemodify(vm_page_t pg, u_int mask, u_int val)
+{
+ vm_offset_t pa;
+ struct pv_entry *pv;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+ int i, s;
+ char *attr;
+ int rv;
+
+ pa = VM_PAGE_TO_PHYS(pg);
+
+ /*
+ * First modify bits in cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return FALSE;
+ }
+
+ *attr &= ~mask >> ATTRSHFT;
+ *attr |= val >> ATTRSHFT;
+
+ pv = pa_to_pv(pa);
+ if (pv->pv_idx < 0) {
+ return FALSE;
+ }
+
+ rv = FALSE;
+ s = splimp();
+ for (; pv; pv = pv->pv_next) {
+ for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(pv->pv_va);
+ tlbsync();
+ rv |= ptp->pte_lo & mask;
+ ptp->pte_lo &= ~mask;
+ ptp->pte_lo |= val;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ }
+ }
+ for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
+ --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(pv->pv_va);
+ tlbsync();
+ rv |= ptp->pte_lo & mask;
+ ptp->pte_lo &= ~mask;
+ ptp->pte_lo |= val;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ }
+ }
+ for (po = potable[pv->pv_idx].lh_first; po;
+ po = po->po_list.le_next) {
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ rv |= ptp->pte_lo & mask;
+ po->po_pte.pte_lo &= ~mask;
+ po->po_pte.pte_lo |= val;
+ }
+ }
+ }
+ splx(s);
+ return rv != 0;
+}
+
+int
+ptebits(vm_page_t pg, int bit)
+{
+ struct pv_entry *pv;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+ int i, s, bits;
+ char *attr;
+ vm_offset_t pa;
+
+ bits = 0;
+ pa = VM_PAGE_TO_PHYS(pg);
+
+ /*
+ * First try the cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return 0;
+ }
+ bits |= (*attr << ATTRSHFT) & bit;
+ if (bits == bit) {
+ return bits;
+ }
+
+ pv = pa_to_pv(pa);
+ if (pv->pv_idx < 0) {
+ return 0;
+ }
+
+ s = splimp();
+ for (; pv; pv = pv->pv_next) {
+ for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ bits |= ptp->pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
+ --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ bits |= ptp->pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ for (po = potable[pv->pv_idx].lh_first; po;
+ po = po->po_list.le_next) {
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ bits |= po->po_pte.pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ }
+ splx(s);
+ return bits;
+}
+
+/*
+ * Lower the protection on the specified physical page.
+ *
+ * There are only two cases: either the protection is going to 0,
+ * or it is going to read-only.
+ */
+void
+pmap_page_protect(vm_page_t m, vm_prot_t prot)
+{
+ vm_offset_t pa;
+ vm_offset_t va;
+ pte_t *ptp;
+ struct pte_ovfl *po, *npo;
+ int i, s, idx;
+ struct pv_entry *pv;
+
+ pa = VM_PAGE_TO_PHYS(m);
+
+ pa &= ~ADDR_POFF;
+ if (prot & VM_PROT_READ) {
+ ptemodify(m, PTE_PP, PTE_RO);
+ return;
+ }
+
+ pv = pa_to_pv(pa);
+ if (pv == NULL) {
+ return;
+ }
+
+ s = splimp();
+ while (pv->pv_idx >= 0) {
+ idx = pv->pv_idx;
+ va = pv->pv_va;
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ goto next;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
+ ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ goto next;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = npo) {
+ npo = po->po_list.le_next;
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, &po->po_pte);
+ LIST_REMOVE(po, po_list);
+ pofree(po, 1);
+ goto next;
+ }
+ }
+next:
+ }
+ splx(s);
+}
+
+/*
+ * Activate the address space for the specified process. If the process
+ * is the current process, load the new MMU context.
+ */
+void
+pmap_activate(struct proc *p)
+{
+ struct pcb *pcb;
+ pmap_t pmap;
+ pmap_t rpm;
+ int psl, i, ksr, seg;
+
+ pcb = &p->p_addr->u_pcb;
+ pmap = p->p_vmspace->vm_map.pmap;
+
+ /*
+ * XXX Normally performed in cpu_fork().
+ */
+ if (pcb->pcb_pm != pmap) {
+ pcb->pcb_pm = pmap;
+ (vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap,
+ (vm_offset_t)pcb->pcb_pm);
+ }
+
+ if (p == curproc) {
+ /* Disable interrupts while switching. */
+ __asm __volatile("mfmsr %0" : "=r"(psl) :);
+ psl &= ~PSL_EE;
+ __asm __volatile("mtmsr %0" :: "r"(psl));
+
+#if 0 /* XXX */
+ /* Store pointer to new current pmap. */
+ curpm = pcb->pcb_pmreal;
+#endif
+
+ /* Save kernel SR. */
+ __asm __volatile("mfsr %0,14" : "=r"(ksr) :);
+
+ /*
+ * Set new segment registers. We use the pmap's real
+ * address to avoid accessibility problems.
+ */
+ rpm = pcb->pcb_pmreal;
+ for (i = 0; i < 16; i++) {
+ seg = rpm->pm_sr[i];
+ __asm __volatile("mtsrin %0,%1"
+ :: "r"(seg), "r"(i << ADDR_SR_SHFT));
+ }
+
+ /* Restore kernel SR. */
+ __asm __volatile("mtsr 14,%0" :: "r"(ksr));
+
+ /* Interrupts are OK again. */
+ psl |= PSL_EE;
+ __asm __volatile("mtmsr %0" :: "r"(psl));
+ }
+}
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ */
+void
+pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vm_offset_t tva = va + i * PAGE_SIZE;
+ pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i]));
+ }
+}
+
+/*
+ * this routine jerks page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ */
+void
+pmap_qremove(vm_offset_t va, int count)
+{
+ vm_offset_t end_va;
+
+ end_va = va + count*PAGE_SIZE;
+
+ while (va < end_va) {
+ unsigned *pte;
+
+ pte = (unsigned *)vtopte(va);
+ *pte = 0;
+ tlbie(va);
+ va += PAGE_SIZE;
+ }
+}
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return the count of reference bits for a page, clearing all of them.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+
+ /* XXX: coming soon... */
+ return (0);
+}
+
+/*
+ * this routine returns true if a physical page resides
+ * in the given pmap.
+ */
+boolean_t
+pmap_page_exists(pmap_t pmap, vm_page_t m)
+{
+#if 0 /* XXX: This must go! */
+ register pv_entry_t pv;
+ int s;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return FALSE;
+
+ s = splvm();
+
+ /*
+ * Not found, check current mappings returning immediately if found.
+ */
+ for (pv = pv_table; pv; pv = pv->pv_next) {
+ if (pv->pv_pmap == pmap) {
+ splx(s);
+ return TRUE;
+ }
+ }
+ splx(s);
+#endif
+ return (FALSE);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+{
+ vm_offset_t sva, va;
+
+ sva = *virt;
+ va = sva;
+
+ while (start < end) {
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+
+ *virt = va;
+ return (sva);
+}
+
+vm_offset_t
+pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
+{
+
+ return (addr);
+}
+
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr)
+{
+
+ /* XXX: coming soon... */
+ return (0);
+}
+
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size, int limit)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_growkernel(vm_offset_t addr)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+/*
+ * Initialize the address space (zone) for the pv_entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
+ */
+void
+pmap_init2()
+{
+ pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
+ pv_entry_high_water = 9 * (pv_entry_max / 10);
+ zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
+}
+
+void
+pmap_swapin_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_swapout_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_new_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable)
+{
+
+ return;
+}
+
+void
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_pinit0(pmap_t pmap)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_dispose_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+vm_offset_t
+pmap_steal_memory(vm_size_t size)
+{
+ vm_size_t bank_size;
+ vm_offset_t pa;
+
+ size = round_page(size);
+
+ bank_size = phys_avail[1] - phys_avail[0];
+ while (size > bank_size) {
+ int i;
+ for (i = 0; phys_avail[i+2]; i+= 2) {
+ phys_avail[i] = phys_avail[i+2];
+ phys_avail[i+1] = phys_avail[i+3];
+ }
+ phys_avail[i] = 0;
+ phys_avail[i+1] = 0;
+ if (!phys_avail[0])
+ panic("pmap_steal_memory: out of memory");
+ bank_size = phys_avail[1] - phys_avail[0];
+ }
+
+ pa = phys_avail[0];
+ phys_avail[0] += size;
+
+ bzero((caddr_t) pa, size);
+ return pa;
+}
diff --git a/sys/powerpc/aim/ofw_machdep.c b/sys/powerpc/aim/ofw_machdep.c
new file mode 100644
index 0000000..dcdc2dd
--- /dev/null
+++ b/sys/powerpc/aim/ofw_machdep.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 1996 Wolfgang Solfrank.
+ * Copyright (C) 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: ofw_machdep.c,v 1.5 2000/05/23 13:25:43 tsubai Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/disk.h>
+#include <sys/disklabel.h>
+#include <sys/fcntl.h>
+#include <sys/malloc.h>
+#include <sys/stat.h>
+
+#include <dev/ofw/openfirm.h>
+
+#include <machine/powerpc.h>
+
+#define OFMEM_REGIONS 32
+static struct mem_region OFmem[OFMEM_REGIONS + 1], OFavail[OFMEM_REGIONS + 3];
+
+extern long ofmsr;
+static int (*ofwcall)(void *);
+
+/*
+ * This is called during powerpc_init, before the system is really initialized.
+ * It shall provide the total and the available regions of RAM.
+ * Both lists must have a zero-size entry as terminator.
+ * The available regions need not take the kernel into account, but needs
+ * to provide space for two additional entry beyond the terminating one.
+ */
+void
+mem_regions(struct mem_region **memp, struct mem_region **availp)
+{
+ int phandle /*, i, j, cnt*/;
+
+ /*
+ * Get memory.
+ */
+ if ((phandle = OF_finddevice("/memory")) == -1
+ || OF_getprop(phandle, "reg",
+ OFmem, sizeof OFmem[0] * OFMEM_REGIONS)
+ <= 0
+ || OF_getprop(phandle, "available",
+ OFavail, sizeof OFavail[0] * OFMEM_REGIONS)
+ <= 0)
+ panic("no memory?");
+ *memp = OFmem;
+ *availp = OFavail;
+}
+
+void
+set_openfirm_callback(int (*openfirm)(void *))
+{
+
+ ofwcall = openfirm;
+}
+
+int
+openfirmware(void *args)
+{
+ long oldmsr;
+ int result;
+
+ __asm( "\t"
+ "mfmsr %0\n\t"
+ "mtmsr %1\n\t"
+ "isync\n"
+ : "=r" (oldmsr)
+ : "r" (ofmsr)
+ );
+
+ result = ofwcall(args);
+
+ __asm( "\t"
+ "mtmsr %0\n\t"
+ "isync\n"
+ : : "r" (oldmsr)
+ );
+
+ return (result);
+}
+
+void
+ppc_exit()
+{
+
+ OF_exit();
+}
+
+void
+ppc_boot(str)
+ char *str;
+{
+
+ OF_boot(str);
+}
diff --git a/sys/powerpc/aim/ofwmagic.S b/sys/powerpc/aim/ofwmagic.S
new file mode 100644
index 0000000..890e86e
--- /dev/null
+++ b/sys/powerpc/aim/ofwmagic.S
@@ -0,0 +1,75 @@
+/* $FreeBSD$ */
+/* $NetBSD: ofwmagic.S,v 1.2 1997/10/09 08:38:18 jtc Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Magic note section used by OpenFirmware.
+ */
+
+ .section ".note"
+
+ # note header
+
+ # length of name
+ .long 8
+
+ # note descriptor size
+ .long 20
+
+ # note type (IEEE 1275)
+ .long 0x1275
+
+ # name of owner
+ .asciz "PowerPC"
+ .balign 4
+
+
+ # note descriptor
+
+ # real mode (-1) or virtual mode (0)
+ .long 0
+
+ # real-base
+ .long -1
+ # real-size
+ .long -1
+
+ # virt-base
+ .long -1
+ # virt-size
+ .long -1
diff --git a/sys/powerpc/aim/swtch.S b/sys/powerpc/aim/swtch.S
new file mode 100644
index 0000000..34bdb85
--- /dev/null
+++ b/sys/powerpc/aim/swtch.S
@@ -0,0 +1,249 @@
+/* $FreeBSD$ */
+/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
+
+/*
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.s"
+
+#include <sys/syscall.h>
+
+#include <machine/trap.h>
+#include <machine/param.h>
+#include <machine/pmap.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+
+/*
+ * Some instructions gas doesn't understand (yet?)
+ */
+#define bdneq bdnzf 2,
+
+/*
+ * No processes are runnable, so loop waiting for one.
+ * Separate label here for accounting purposes.
+ */
+#if 0 /* XXX: I think this is now unneeded. Leaving it in just in case. */
+ASENTRY(Idle)
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l /* disable interrupts while
+ manipulating runque */
+ mtmsr 3
+
+ lis 8,sched_whichqs@ha
+ lwz 9,sched_whichqs@l(8)
+
+ or. 9,9,9
+ bne- .Lsw1 /* at least one queue non-empty */
+
+ ori 3,3,PSL_EE@l /* reenable ints again */
+ mtmsr 3
+ isync
+
+/* Check if we can use power saving mode */
+ lis 8,powersave@ha
+ lwz 9,powersave@l(8)
+
+ or. 9,9,9
+ beq 1f
+
+ sync
+ oris 3,3,PSL_POW@h /* enter power saving mode */
+ mtmsr 3
+ isync
+1:
+ b _ASM_LABEL(Idle)
+#endif /* XXX */
+
+/*
+ * switchexit gets called from cpu_exit to complete the exit procedure.
+ */
+ENTRY(switchexit)
+/* First switch to the idle pcb/kernel stack */
+#if 0 /* XXX */
+ lis 6,idle_u@ha
+ lwz 6,idle_u@l(6)
+ mfsprg 7,0
+ stw 6,GD_CURPCB(7)
+#endif
+ addi 1,6,USPACE-16 /* 16 bytes are reserved at stack top */
+ /*
+ * Schedule the vmspace and stack to be freed (the proc arg is
+ * already in r3).
+ */
+ bl sys_exit
+
+/* Fall through to cpu_switch to actually select another proc */
+ li 3,0 /* indicate exited process */
+
+/*
+ * void cpu_switch(struct proc *p)
+ * Find a runnable process and switch to it.
+ */
+/* XXX noprofile? --thorpej@netbsd.org */
+ENTRY(cpu_switch)
+ mflr 0 /* save lr */
+ stw 0,4(1)
+ stwu 1,-16(1)
+ stw 31,12(1)
+ stw 30,8(1)
+
+ mr 30,3
+ mfsprg 3,0
+ xor 31,31,31
+ stw 31,GD_CURPROC(3) /* Zero to not accumulate cpu time */
+ mfsprg 3,0
+ lwz 31,GD_CURPCB(3)
+
+ xor 3,3,3
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+ stw 3,PCB_SPL(31) /* save spl */
+
+/* Find a new process */
+ bl chooseproc
+
+1:
+ /* just did this resched thing */
+ xor 3,3,3
+ lis 4,want_resched@ha
+ stw 3,want_resched@l(4)
+
+ /* record new process */
+ mfsprg 4,0
+ stw 3,GD_CURPROC(4)
+
+ cmpl 0,31,30 /* is it the same process? */
+ beq switch_return
+
+ or. 30,30,30 /* old process was exiting? */
+ beq switch_exited
+
+ mfsr 10,USER_SR /* save USER_SR for copyin/copyout */
+ mfcr 11 /* save cr */
+ mr 12,2 /* save r2 */
+ stwu 1,-SFRAMELEN(1) /* still running on old stack */
+ stmw 10,8(1)
+ lwz 3,P_ADDR(30)
+ stw 1,PCB_SP(3) /* save SP */
+
+switch_exited:
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l /* disable interrupts while
+ actually switching */
+ mtmsr 3
+
+ /* indicate new pcb */
+ lwz 4,P_ADDR(31)
+ mfsprg 5,0
+ stw 4,GD_CURPCB(5)
+
+#if 0 /* XXX */
+ /* save real pmap pointer for spill fill */
+ lwz 5,PCB_PMR(4)
+ lis 6,curpm@ha
+ stwu 5,curpm@l(6)
+ stwcx. 5,0,6 /* clear possible reservation */
+#endif
+
+ addic. 5,5,64
+ li 6,0
+ mfsr 8,KERNEL_SR /* save kernel SR */
+1:
+ addis 6,6,-0x10000000@ha /* set new procs segment registers */
+ or. 6,6,6 /* This is done from the real
+ address pmap */
+ lwzu 7,-4(5) /* so we don't have to worry */
+ mtsrin 7,6 /* about accessibility */
+ bne 1b
+ mtsr KERNEL_SR,8 /* restore kernel SR */
+ isync
+
+ lwz 1,PCB_SP(4) /* get new procs SP */
+
+ ori 3,3,PSL_EE@l /* interrupts are okay again */
+ mtmsr 3
+
+ lmw 10,8(1) /* get other regs */
+ lwz 1,0(1) /* get saved SP */
+ mr 2,12 /* get saved r2 */
+ mtcr 11 /* get saved cr */
+ isync
+ mtsr USER_SR,10 /* get saved USER_SR */
+ isync
+
+switch_return:
+ mr 30,7 /* save proc pointer */
+ lwz 3,PCB_SPL(4)
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+
+ mr 3,30 /* get curproc for special fork
+ returns */
+
+ lwz 31,12(1)
+ lwz 30,8(1)
+ addi 1,1,16
+ lwz 0,4(1)
+ mtlr 0
+ blr
+
+/*
+ * Fake savectx for the time being.
+ */
+ENTRY(savectx)
+ blr
diff --git a/sys/powerpc/aim/trap.c b/sys/powerpc/aim/trap.c
new file mode 100644
index 0000000..408ed44
--- /dev/null
+++ b/sys/powerpc/aim/trap.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: trap.c,v 1.26 2000/05/27 00:40:40 sommerfeld Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include "opt_ddb.h"
+#include "opt_ktrace.h"
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/syscall.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/user.h>
+#include <sys/ktrace.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/pmap.h>
+
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/psl.h>
+#include <machine/trap.h>
+
+/* These definitions should probably be somewhere else XXX */
+#define FIRSTARG 3 /* first argument is in reg 3 */
+#define NARGREG 8 /* 8 args are in registers */
+#define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */
+
+volatile int astpending;
+volatile int want_resched;
+
+#if 0 /* XXX: not used yet */
+static int fix_unaligned __P((struct proc *p, struct trapframe *frame));
+#endif
+
+void
+trap(struct trapframe *frame)
+{
+#if 0 /* XXX: This code hasn't been reworked yet. */
+ struct proc *p;
+ int type;
+ u_quad_t sticks;
+
+ p = curproc;
+ type = frame->exc;
+
+ if (frame->srr1 & PSL_PR) {
+ type |= EXC_USER;
+ sticks = p->p_sticks;
+ }
+
+ switch (type) {
+ case EXC_TRC|EXC_USER:
+ frame->srr1 &= ~PSL_SE;
+ trapsignal(p, SIGTRAP, EXC_TRC);
+ break;
+ case EXC_DSI:
+ {
+ vm_map_t map;
+ vaddr_t va;
+ int ftype;
+ faultbuf *fb;
+
+ map = kernel_map;
+ va = frame->dar;
+ if ((va >> ADDR_SR_SHFT) == USER_SR) {
+ sr_t user_sr;
+
+ __asm ("mfsr %0, %1"
+ : "=r"(user_sr) : "K"(USER_SR));
+ va &= ADDR_PIDX | ADDR_POFF;
+ va |= user_sr << ADDR_SR_SHFT;
+ map = &p->p_vmspace->vm_map;
+ }
+ if (frame->dsisr & DSISR_STORE)
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+ if (uvm_fault(map, trunc_page(va), 0, ftype)
+ == KERN_SUCCESS)
+ return;
+ if (fb = p->p_addr->u_pcb.pcb_onfault) {
+ frame->srr0 = (*fb)[0];
+ frame->fixreg[1] = (*fb)[1];
+ frame->fixreg[2] = (*fb)[2];
+ frame->cr = (*fb)[3];
+ bcopy(&(*fb)[4], &frame->fixreg[13],
+ 19 * sizeof(register_t));
+ return;
+ }
+ map = kernel_map;
+ }
+ goto brain_damage;
+ case EXC_DSI|EXC_USER:
+ {
+ int ftype, rv;
+
+ if (frame->dsisr & DSISR_STORE)
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+ if ((rv = uvm_fault(&p->p_vmspace->vm_map,
+ trunc_page(frame->dar), 0, ftype))
+ == KERN_SUCCESS)
+ break;
+ if (rv == KERN_RESOURCE_SHORTAGE) {
+ printf("UVM: pid %d (%s), uid %d killed: "
+ "out of swap\n",
+ p->p_pid, p->p_comm,
+ p->p_cred && p->p_ucred ?
+ p->p_ucred->cr_uid : -1);
+ trapsignal(p, SIGKILL, EXC_DSI);
+ } else {
+ trapsignal(p, SIGSEGV, EXC_DSI);
+ }
+ }
+ break;
+ case EXC_ISI|EXC_USER:
+ {
+ int ftype;
+
+ ftype = VM_PROT_READ | VM_PROT_EXECUTE;
+ if (uvm_fault(&p->p_vmspace->vm_map,
+ trunc_page(frame->srr0), 0, ftype)
+ == KERN_SUCCESS)
+ break;
+ }
+ trapsignal(p, SIGSEGV, EXC_ISI);
+ break;
+ case EXC_SC|EXC_USER:
+ {
+ struct sysent *callp;
+ size_t argsize;
+ register_t code, error;
+ register_t *params, rval[2];
+ int nsys, n;
+ register_t args[10];
+
+ uvmexp.syscalls++;
+
+ nsys = p->p_emul->e_nsysent;
+ callp = p->p_emul->e_sysent;
+
+ code = frame->fixreg[0];
+ params = frame->fixreg + FIRSTARG;
+
+ switch (code) {
+ case SYS_syscall:
+ /*
+ * code is first argument,
+ * followed by actual args.
+ */
+ code = *params++;
+ break;
+ case SYS___syscall:
+ /*
+ * Like syscall, but code is a quad,
+ * so as to maintain quad alignment
+ * for the rest of the args.
+ */
+ if (callp != sysent)
+ break;
+ params++;
+ code = *params++;
+ break;
+ default:
+ break;
+ }
+ if (code < 0 || code >= nsys)
+ callp += p->p_emul->e_nosys;
+ else
+ callp += code;
+ argsize = callp->sy_argsize;
+ n = NARGREG - (params - (frame->fixreg + FIRSTARG));
+ if (argsize > n * sizeof(register_t)) {
+ bcopy(params, args, n * sizeof(register_t));
+ if (error = copyin(MOREARGS(frame->fixreg[1]),
+ args + n,
+ argsize - n * sizeof(register_t))) {
+#ifdef KTRACE
+ /* Can't get all the arguments! */
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p, code, argsize,
+ args);
+#endif
+ goto syscall_bad;
+ }
+ params = args;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p, code, argsize, params);
+#endif
+ rval[0] = 0;
+ rval[1] = frame->fixreg[FIRSTARG + 1];
+
+ switch (error = (*callp->sy_call)(p, params, rval)) {
+ case 0:
+ frame->fixreg[FIRSTARG] = rval[0];
+ frame->fixreg[FIRSTARG + 1] = rval[1];
+ frame->cr &= ~0x10000000;
+ break;
+ case ERESTART:
+ /*
+ * Set user's pc back to redo the system call.
+ */
+ frame->srr0 -= 4;
+ break;
+ case EJUSTRETURN:
+ /* nothing to do */
+ break;
+ default:
+syscall_bad:
+ if (p->p_emul->e_errno)
+ error = p->p_emul->e_errno[error];
+ frame->fixreg[FIRSTARG] = error;
+ frame->cr |= 0x10000000;
+ break;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, code, error, rval[0]);
+#endif
+ }
+ break;
+
+ case EXC_FPU|EXC_USER:
+ if (fpuproc)
+ save_fpu(fpuproc);
+ fpuproc = p;
+ enable_fpu(p);
+ break;
+
+ case EXC_AST|EXC_USER:
+ /* This is just here that we trap */
+ break;
+
+ case EXC_ALI|EXC_USER:
+ if (fix_unaligned(p, frame) != 0)
+ trapsignal(p, SIGBUS, EXC_ALI);
+ else
+ frame->srr0 += 4;
+ break;
+
+ case EXC_PGM|EXC_USER:
+/* XXX temporarily */
+ if (frame->srr1 & 0x0002000)
+ trapsignal(p, SIGTRAP, EXC_PGM);
+ else
+ trapsignal(p, SIGILL, EXC_PGM);
+ break;
+
+ case EXC_MCHK:
+ {
+ faultbuf *fb;
+
+ if (fb = p->p_addr->u_pcb.pcb_onfault) {
+ frame->srr0 = (*fb)[0];
+ frame->fixreg[1] = (*fb)[1];
+ frame->fixreg[2] = (*fb)[2];
+ frame->cr = (*fb)[3];
+ bcopy(&(*fb)[4], &frame->fixreg[13],
+ 19 * sizeof(register_t));
+ return;
+ }
+ }
+ goto brain_damage;
+
+ default:
+brain_damage:
+ printf("trap type %x at %x\n", type, frame->srr0);
+#ifdef DDB
+ Debugger(); /* XXX temporarily */
+#endif
+#ifdef TRAP_PANICWAIT
+ printf("Press a key to panic.\n");
+ cngetc();
+#endif
+ panic("trap");
+ }
+
+ astpending = 0; /* we are about to do it */
+
+ uvmexp.softs++;
+
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+
+ /* take pending signals */
+ {
+ int sig;
+
+ while (sig = CURSIG(p))
+ postsig(sig);
+ }
+
+ p->p_priority = p->p_usrpri;
+ if (want_resched) {
+ int sig;
+ /*
+ * We are being preempted.
+ */
+ preempt(NULL);
+ while (sig = CURSIG(p))
+ postsig(sig);
+ }
+
+ /*
+ * If profiling, charge recent system time to the trapped pc.
+ */
+ if (p->p_flag & P_PROFIL) {
+ extern int psratio;
+
+ addupc_task(p, frame->srr0,
+ (int)(p->p_sticks - sticks) * psratio);
+ }
+ /*
+ * If someone stole the fpu while we were away, disable it
+ */
+ if (p != fpuproc)
+ frame->srr1 &= ~PSL_FP;
+ curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
+#endif
+}
+
+#if 0 /* XXX: child_return not used */
+void
+child_return(void *arg)
+{
+ struct proc *p;
+ struct trapframe *tf;
+
+ p = arg;
+ tf = trapframe(p);
+
+ tf->fixreg[FIRSTARG] = 0;
+ tf->fixreg[FIRSTARG + 1] = 1;
+ tf->cr &= ~0x10000000;
+ tf->srr1 &= ~PSL_FP; /* Disable FPU, as we can't be fpuproc */
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, SYS_fork, 0, 0);
+#endif
+ /* Profiling? XXX */
+ curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
+}
+#endif
+
+static __inline void
+setusr(int content)
+{
+
+ __asm __volatile ("isync; mtsr %0,%1; isync"
+ :: "n"(USER_SR), "r"(content));
+}
+
+int
+copyin(udaddr, kaddr, len)
+ const void *udaddr;
+ void *kaddr;
+ size_t len;
+{
+ const char *up;
+ char *kp;
+ char *p;
+ size_t l;
+ faultbuf env;
+
+ up = udaddr;
+ kp = kaddr;
+
+#if 0
+ if (setfault(env)) {
+ curpcb->pcb_onfault = 0;
+ return EFAULT;
+ }
+#endif
+ while (len > 0) {
+ p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
+ l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
+ if (l > len)
+ l = len;
+ setusr(curpcb->pcb_pm->pm_sr[(u_int)up >> ADDR_SR_SHFT]);
+ bcopy(p, kp, l);
+ up += l;
+ kp += l;
+ len -= l;
+ }
+ curpcb->pcb_onfault = 0;
+ return 0;
+}
+
+int
+copyout(kaddr, udaddr, len)
+ const void *kaddr;
+ void *udaddr;
+ size_t len;
+{
+ const char *kp;
+ char *up;
+ char *p;
+ size_t l;
+ faultbuf env;
+
+ kp = kaddr;
+ up = udaddr;
+
+#if 0
+ if (setfault(env)) {
+ curpcb->pcb_onfault = 0;
+ return EFAULT;
+ }
+#endif
+ while (len > 0) {
+ p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
+ l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
+ if (l > len)
+ l = len;
+ setusr(curpcb->pcb_pm->pm_sr[(u_int)up >> ADDR_SR_SHFT]);
+ bcopy(kp, p, l);
+ up += l;
+ kp += l;
+ len -= l;
+ }
+ curpcb->pcb_onfault = 0;
+ return 0;
+}
+
+#if 0 /* XXX: not used yet */
+/*
+ * kcopy(const void *src, void *dst, size_t len);
+ *
+ * Copy len bytes from src to dst, aborting if we encounter a fatal
+ * page fault.
+ *
+ * kcopy() _must_ save and restore the old fault handler since it is
+ * called by uiomove(), which may be in the path of servicing a non-fatal
+ * page fault.
+ */
+int
+kcopy(const void *src, void *dst, size_t len)
+{
+ faultbuf env, *oldfault;
+
+ oldfault = curpcb->pcb_onfault;
+ if (setfault(env)) {
+ curpcb->pcb_onfault = oldfault;
+ return EFAULT;
+ }
+
+ bcopy(src, dst, len);
+
+ curpcb->pcb_onfault = oldfault;
+ return 0;
+}
+
+int
+badaddr(void *addr, size_t size)
+{
+
+ return badaddr_read(addr, size, NULL);
+}
+
+int
+badaddr_read(void *addr, size_t size, int *rptr)
+{
+ faultbuf env;
+ int x;
+
+ /* Get rid of any stale machine checks that have been waiting. */
+ __asm __volatile ("sync; isync");
+
+ if (setfault(env)) {
+ curpcb->pcb_onfault = 0;
+ __asm __volatile ("sync");
+ return 1;
+ }
+
+ __asm __volatile ("sync");
+
+ switch (size) {
+ case 1:
+ x = *(volatile int8_t *)addr;
+ break;
+ case 2:
+ x = *(volatile int16_t *)addr;
+ break;
+ case 4:
+ x = *(volatile int32_t *)addr;
+ break;
+ default:
+ panic("badaddr: invalid size (%d)", size);
+ }
+
+ /* Make sure we took the machine check, if we caused one. */
+ __asm __volatile ("sync; isync");
+
+ curpcb->pcb_onfault = 0;
+ __asm __volatile ("sync"); /* To be sure. */
+
+ /* Use the value to avoid reorder. */
+ if (rptr)
+ *rptr = x;
+
+ return 0;
+}
+#endif
+
+/*
+ * For now, this only deals with the particular unaligned access case
+ * that gcc tends to generate. Eventually it should handle all of the
+ * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
+ */
+
+#if 0 /* XXX: Not used yet */
+static int
+fix_unaligned(p, frame)
+ struct proc *p;
+ struct trapframe *frame;
+{
+ int indicator;
+
+ indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);
+
+ switch (indicator) {
+ case EXC_ALI_LFD:
+ case EXC_ALI_STFD:
+ {
+ int reg = EXC_ALI_RST(frame->dsisr);
+ double *fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg];
+
+ /* Juggle the FPU to ensure that we've initialized
+ * the FPRs, and that their current state is in
+ * the PCB.
+ */
+ if (fpuproc != p) {
+ if (fpuproc)
+ save_fpu(fpuproc);
+ enable_fpu(p);
+ }
+ save_fpu(p);
+
+ if (indicator == EXC_ALI_LFD) {
+ if (copyin((void *)frame->dar, fpr,
+ sizeof(double)) != 0)
+ return -1;
+ enable_fpu(p);
+ } else {
+ if (copyout(fpr, (void *)frame->dar,
+ sizeof(double)) != 0)
+ return -1;
+ }
+ return 0;
+ }
+ break;
+ }
+
+ return -1;
+}
+#endif
+
+void
+userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
+{
+
+ /* XXX: Coming soon */
+ return;
+}
diff --git a/sys/powerpc/include/bat.h b/sys/powerpc/include/bat.h
new file mode 100644
index 0000000..a76b383
--- /dev/null
+++ b/sys/powerpc/include/bat.h
@@ -0,0 +1,172 @@
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: bat.h,v 1.2 1999/12/18 01:36:06 thorpej Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BAT_H_
+#define _MACHINE_BAT_H_
+
+struct bat {
+ u_int32_t batu;
+ u_int32_t batl;
+};
+
+/* Lower BAT bits (all but PowerPC 601): */
+#define BAT_PBS 0xfffe0000 /* physical block start */
+#define BAT_W 0x00000040 /* 1 = write-through, 0 = write-back */
+#define BAT_I 0x00000020 /* cache inhibit */
+#define BAT_M 0x00000010 /* memory coherency enable */
+#define BAT_G 0x00000008 /* guarded region */
+
+#define BAT_PP_NONE 0x00000000 /* no access permission */
+#define BAT_PP_RO_S 0x00000001 /* read-only (soft) */
+#define BAT_PP_RW 0x00000002 /* read/write */
+#define BAT_PP_RO 0x00000003 /* read-only */
+
+/* Upper BAT bits (all but PowerPC 601): */
+#define BAT_EBS 0xfffe0000 /* effective block start */
+#define BAT_BL 0x00001ffc /* block length */
+#define BAT_Vs 0x00000002 /* valid in supervisor mode */
+#define BAT_Vu 0x00000001 /* valid in user mode */
+
+#define BAT_V (BAT_Vs|BAT_Vu)
+
+/* Block Length encoding (all but PowerPC 601): */
+#define BAT_BL_128K 0x00000000
+#define BAT_BL_256K 0x00000004
+#define BAT_BL_512K 0x0000000c
+#define BAT_BL_1M 0x0000001c
+#define BAT_BL_2M 0x0000003c
+#define BAT_BL_4M 0x0000007c
+#define BAT_BL_8M 0x000000fc
+#define BAT_BL_16M 0x000001fc
+#define BAT_BL_32M 0x000003fc
+#define BAT_BL_64M 0x000007fc
+#define BAT_BL_128M 0x00000ffc
+#define BAT_BL_256M 0x00001ffc
+
+#define BATU(va, len, v) \
+ (((va) & BAT_EBS) | ((len) & BAT_BL) | ((v) & BAT_V))
+
+#define BATL(pa, wimg, pp) \
+ (((pa) & BAT_PBS) | (wimg) | (pp))
+
+
+/* Lower BAT bits (PowerPC 601): */
+#define BAT601_PBN 0xfffe0000 /* physical block number */
+#define BAT601_V 0x00000040 /* valid */
+#define BAT601_BSM 0x0000003f /* block size mask */
+
+/* Upper BAT bits (PowerPC 601): */
+#define BAT601_BLPI 0xfffe0000 /* block logical page index */
+#define BAT601_W 0x00000040 /* 1 = write-through, 0 = write-back */
+#define BAT601_I 0x00000020 /* cache inhibit */
+#define BAT601_M 0x00000010 /* memory coherency enable */
+#define BAT601_Ks 0x00000008 /* key-supervisor */
+#define BAT601_Ku 0x00000004 /* key-user */
+
+/*
+ * Permission bits on the PowerPC 601 are modified by the appropriate
+ * Key bit:
+ *
+ * Key PP Access
+ * 0 NONE read/write
+ * 0 RO_S read/write
+ * 0 RW read/write
+ * 0 RO read-only
+ *
+ * 1 NONE none
+ * 1 RO_S read-only
+ * 1 RW read/write
+ * 1 RO read-only
+ */
+#define BAT601_PP_NONE 0x00000000 /* no access permission */
+#define BAT601_PP_RO_S 0x00000001 /* read-only (soft) */
+#define BAT601_PP_RW 0x00000002 /* read/write */
+#define BAT601_PP_RO 0x00000003 /* read-only */
+
+/* Block Size Mask encoding (PowerPC 601): */
+#define BAT601_BSM_128K 0x00000000
+#define BAT601_BSM_256K 0x00000001
+#define BAT601_BSM_512K 0x00000003
+#define BAT601_BSM_1M 0x00000007
+#define BAT601_BSM_2M 0x0000000f
+#define BAT601_BSM_4M 0x0000001f
+#define BAT601_BSM_8M 0x0000003f
+
+#define BATU601(va, wim, key, pp) \
+ (((va) & BAT601_BLPI) | (wim) | (key) | (pp))
+
+#define BATL601(pa, size, v) \
+ (((pa) & BAT601_PBN) | (v) | (size))
+
+#ifdef _KERNEL
+extern struct bat battable[16];
+#endif
+
+#endif /* _MACHINE_BAT_H_ */
diff --git a/sys/powerpc/include/bus.h b/sys/powerpc/include/bus.h
new file mode 100644
index 0000000..a906e35
--- /dev/null
+++ b/sys/powerpc/include/bus.h
@@ -0,0 +1,802 @@
+/*-
+ * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: bus.h,v 1.9.4.1 2000/06/30 16:27:30 simonb Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACPPC_BUS_H_
+#define _MACPPC_BUS_H_
+
+#include <machine/pio.h>
+
+/*
+ * Values for the macppc bus space tag, not to be used directly by MI code.
+ */
+
+#define __BUS_SPACE_HAS_STREAM_METHODS
+
+#define MACPPC_BUS_ADDR_MASK 0xfffff000
+#define MACPPC_BUS_STRIDE_MASK 0x0000000f
+
+#define macppc_make_bus_space_tag(addr, stride) \
+ (((addr) & MACPPC_BUS_ADDR_MASK) | (stride))
+#define __BA(t, h, o) ((void *)((h) + ((o) << ((t) & MACPPC_BUS_STRIDE_MASK))))
+
+/*
+ * Bus address and size types
+ */
+typedef u_int32_t bus_addr_t;
+typedef u_int32_t bus_size_t;
+
+/*
+ * Access methods for bus resources and address space.
+ */
+typedef u_int32_t bus_space_tag_t;
+typedef u_int32_t bus_space_handle_t;
+
+/*
+ * int bus_space_map __P((bus_space_tag_t t, bus_addr_t addr,
+ * bus_size_t size, int flags, bus_space_handle_t *bshp));
+ *
+ * Map a region of bus space.
+ */
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_LINEAR 0x02
+#define BUS_SPACE_MAP_PREFETCHABLE 0x04
+
+extern void *mapiodev(vm_offset_t, vm_size_t);
+
+static __inline int
+bus_space_map(bus_space_tag_t t, bus_addr_t addr, bus_size_t size, int flags,
+ bus_space_handle_t *bshp)
+{
+ vm_offset_t base = t & MACPPC_BUS_ADDR_MASK;
+ int stride = t & MACPPC_BUS_STRIDE_MASK;
+
+ *bshp = (bus_space_handle_t)
+ mapiodev(base + (addr << stride), size << stride);
+ return 0;
+}
+
+/*
+ * int bus_space_unmap __P((bus_space_tag_t t,
+ * bus_space_handle_t bsh, bus_size_t size));
+ *
+ * Unmap a region of bus space.
+ */
+
+#define bus_space_unmap(t, bsh, size)
+
+/*
+ * int bus_space_subregion __P((bus_space_tag_t t,
+ * bus_space_handle_t bsh, bus_size_t offset, bus_size_t size,
+ * bus_space_handle_t *nbshp));
+ *
+ * Get a new handle for a subregion of an already-mapped area of bus space.
+ */
+
+#define bus_space_subregion(t, bsh, offset, size, bshp) \
+ ((*(bshp) = (bus_space_handle_t)__BA(t, bsh, offset)), 0)
+
+/*
+ * int bus_space_alloc __P((bus_space_tag_t t, bus_addr_t rstart,
+ * bus_addr_t rend, bus_size_t size, bus_size_t align,
+ * bus_size_t boundary, int flags, bus_addr_t *addrp,
+ * bus_space_handle_t *bshp));
+ *
+ * Allocate a region of bus space.
+ */
+
+#if 0
+#define bus_space_alloc(t, rs, re, s, a, b, f, ap, hp) !!! unimplemented !!!
+#endif
+
+/*
+ * int bus_space_free __P((bus_space_tag_t t,
+ * bus_space_handle_t bsh, bus_size_t size));
+ *
+ * Free a region of bus space.
+ */
+#if 0
+#define bus_space_free(t, h, s) !!! unimplemented !!!
+#endif
+
+/*
+ * u_intN_t bus_space_read_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset));
+ *
+ * Read a 1, 2, 4, or 8 byte quantity from bus space
+ * described by tag/handle/offset.
+ */
+
+#define bus_space_read_1(t, h, o) (in8(__BA(t, h, o)))
+#define bus_space_read_2(t, h, o) (in16rb(__BA(t, h, o)))
+#define bus_space_read_4(t, h, o) (in32rb(__BA(t, h, o)))
+#if 0 /* Cause a link error for bus_space_read_8 */
+#define bus_space_read_8(t, h, o) !!! unimplemented !!!
+#endif
+
+#define bus_space_read_stream_1(t, h, o) (in8(__BA(t, h, o)))
+#define bus_space_read_stream_2(t, h, o) (in16(__BA(t, h, o)))
+#define bus_space_read_stream_4(t, h, o) (in32(__BA(t, h, o)))
+#if 0 /* Cause a link error for bus_space_read_stream_8 */
+#define bus_space_read_8(t, h, o) !!! unimplemented !!!
+#endif
+
+/*
+ * void bus_space_read_multi_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset,
+ * u_intN_t *addr, size_t count));
+ *
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle/offset and copy into buffer provided.
+ */
+
+#define bus_space_read_multi_1(t, h, o, a, c) do { \
+ ins8(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#define bus_space_read_multi_2(t, h, o, a, c) do { \
+ ins16rb(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#define bus_space_read_multi_4(t, h, o, a, c) do { \
+ ins32rb(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#if 0 /* Cause a link error for bus_space_read_multi_8 */
+#define bus_space_read_multi_8 !!! unimplemented !!!
+#endif
+
+#define bus_space_read_multi_stream_1(t, h, o, a, c) do { \
+ ins8(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#define bus_space_read_multi_stream_2(t, h, o, a, c) do { \
+ ins16(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#define bus_space_read_multi_stream_4(t, h, o, a, c) do { \
+ ins32(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#if 0 /* Cause a link error for bus_space_read_multi_stream_8 */
+#define bus_space_read_multi_stream_8 !!! unimplemented !!!
+#endif
+
+/*
+ * void bus_space_read_region_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset,
+ * u_intN_t *addr, size_t count));
+ *
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle and starting at `offset' and copy into
+ * buffer provided.
+ */
+
+static __inline void
+bus_space_read_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr, size_t count)
+{
+ volatile u_int8_t *s = __BA(tag, bsh, offset);
+
+ while (count--)
+ *addr++ = *s++;
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_read_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr, size_t count)
+{
+ volatile u_int16_t *s = __BA(tag, bsh, offset);
+
+ while (count--)
+ __asm __volatile("lhbrx %0, 0, %1" :
+ "=r"(*addr++) : "r"(s++));
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_read_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr, size_t count)
+{
+ volatile u_int32_t *s = __BA(tag, bsh, offset);
+
+ while (count--)
+ __asm __volatile("lwbrx %0, 0, %1" :
+ "=r"(*addr++) : "r"(s++));
+ __asm __volatile("eieio; sync");
+}
+
+#if 0 /* Cause a link error for bus_space_read_region_8 */
+#define bus_space_read_region_8 !!! unimplemented !!!
+#endif
+
+static __inline void
+bus_space_read_region_stream_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr, size_t count)
+{
+ volatile u_int16_t *s = __BA(tag, bsh, offset);
+
+ while (count--)
+ *addr++ = *s++;
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_read_region_stream_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr, size_t count)
+{
+ volatile u_int32_t *s = __BA(tag, bsh, offset);
+
+ while (count--)
+ *addr++ = *s++;
+ __asm __volatile("eieio; sync");
+}
+
+#if 0 /* Cause a link error */
+#define bus_space_read_region_stream_8 !!! unimplemented !!!
+#endif
+
+/*
+ * void bus_space_write_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset,
+ * u_intN_t value));
+ *
+ * Write the 1, 2, 4, or 8 byte value `value' to bus space
+ * described by tag/handle/offset.
+ */
+
+#define bus_space_write_1(t, h, o, v) out8(__BA(t, h, o), (v))
+#define bus_space_write_2(t, h, o, v) out16rb(__BA(t, h, o), (v))
+#define bus_space_write_4(t, h, o, v) out32rb(__BA(t, h, o), (v))
+
+#define bus_space_write_stream_1(t, h, o, v) out8(__BA(t, h, o), (v))
+#define bus_space_write_stream_2(t, h, o, v) out16(__BA(t, h, o), (v))
+#define bus_space_write_stream_4(t, h, o, v) out32(__BA(t, h, o), (v))
+
+#if 0 /* Cause a link error for bus_space_write_8 */
+#define bus_space_write_8 !!! unimplemented !!!
+#endif
+
+/*
+ * void bus_space_write_multi_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset,
+ * const u_intN_t *addr, size_t count));
+ *
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
+ * provided to bus space described by tag/handle/offset.
+ */
+
+#define bus_space_write_multi_1(t, h, o, a, c) do { \
+ outsb(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#define bus_space_write_multi_2(t, h, o, a, c) do { \
+ outsw(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#define bus_space_write_multi_4(t, h, o, a, c) do { \
+ outsl(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#if 0
+#define bus_space_write_multi_8 !!! unimplemented !!!
+#endif
+
+#define bus_space_write_multi_stream_2(t, h, o, a, c) do { \
+ outsw(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#define bus_space_write_multi_stream_4(t, h, o, a, c) do { \
+ outsl(__BA(t, h, o), (a), (c)); \
+ } while (0)
+
+#if 0
+#define bus_space_write_multi_stream_8 !!! unimplemented !!!
+#endif
+
+/*
+ * void bus_space_write_region_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset,
+ * const u_intN_t *addr, size_t count));
+ *
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer provided
+ * to bus space described by tag/handle starting at `offset'.
+ */
+
+static __inline void
+bus_space_write_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+ volatile u_int8_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d++ = *addr++;
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_write_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+ volatile u_int16_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ __asm __volatile("sthbrx %0, 0, %1" ::
+ "r"(*addr++), "r"(d++));
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_write_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+ volatile u_int32_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ __asm __volatile("stwbrx %0, 0, %1" ::
+ "r"(*addr++), "r"(d++));
+ __asm __volatile("eieio; sync");
+}
+
+#if 0
+#define bus_space_write_region_8 !!! bus_space_write_region_8 unimplemented !!!
+#endif
+
+static __inline void
+bus_space_write_region_stream_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+ volatile u_int16_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d++ = *addr++;
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_write_region_stream_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+ volatile u_int32_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d++ = *addr++;
+ __asm __volatile("eieio; sync");
+}
+
+#if 0
+#define bus_space_write_region_stream_8 !!! unimplemented !!!
+#endif
+
+/*
+ * void bus_space_set_multi_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset, u_intN_t val,
+ * size_t count));
+ *
+ * Write the 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle/offset `count' times.
+ */
+
+static __inline void
+bus_space_set_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t val, size_t count)
+{
+ volatile u_int8_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d = val;
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_set_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t val, size_t count)
+{
+ volatile u_int16_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ __asm __volatile("sthbrx %0, 0, %1" ::
+ "r"(val), "r"(d));
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_set_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t val, size_t count)
+{
+ volatile u_int32_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ __asm __volatile("stwbrx %0, 0, %1" ::
+ "r"(val), "r"(d));
+ __asm __volatile("eieio; sync");
+}
+
+#if 0
+#define bus_space_set_multi_8 !!! bus_space_set_multi_8 unimplemented !!!
+#endif
+
+static __inline void
+bus_space_set_multi_stream_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t val, size_t count)
+{
+ volatile u_int16_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d = val;
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_set_multi_stream_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t val, size_t count)
+{
+ volatile u_int32_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d = val;
+ __asm __volatile("eieio; sync");
+}
+
+#if 0
+#define bus_space_set_multi_stream_8 !!! unimplemented !!!
+#endif
+
+/*
+ * void bus_space_set_region_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset, u_intN_t val,
+ * size_t count));
+ *
+ * Write `count' 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle starting at `offset'.
+ */
+
+static __inline void
+bus_space_set_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t val, size_t count)
+{
+ volatile u_int8_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d++ = val;
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_set_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t val, size_t count)
+{
+ volatile u_int16_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ __asm __volatile("sthbrx %0, 0, %1" ::
+ "r"(val), "r"(d++));
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_set_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t val, size_t count)
+{
+ volatile u_int32_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ __asm __volatile("stwbrx %0, 0, %1" ::
+ "r"(val), "r"(d++));
+ __asm __volatile("eieio; sync");
+}
+
+#if 0
+#define bus_space_set_region_8 !!! bus_space_set_region_8 unimplemented !!!
+#endif
+
+static __inline void
+bus_space_set_region_stream_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t val, size_t count)
+{
+ volatile u_int16_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d++ = val;
+ __asm __volatile("eieio; sync");
+}
+
+static __inline void
+bus_space_set_region_stream_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t val, size_t count)
+{
+ volatile u_int32_t *d = __BA(tag, bsh, offset);
+
+ while (count--)
+ *d++ = val;
+ __asm __volatile("eieio; sync");
+}
+
+#if 0
+#define bus_space_set_region_stream_8 !!! unimplemented !!!
+#endif
+
+/*
+ * void bus_space_copy_region_N __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh1, bus_size_t off1,
+ * bus_space_handle_t bsh2, bus_size_t off2,
+ * size_t count));
+ *
+ * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
+ * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
+ */
+
+ /* XXX IMPLEMENT bus_space_copy_N() XXX */
+
+/*
+ * Bus read/write barrier methods.
+ *
+ * void bus_space_barrier __P((bus_space_tag_t tag,
+ * bus_space_handle_t bsh, bus_size_t offset,
+ * bus_size_t len, int flags));
+ *
+ * Note: the macppc does not currently require barriers, but we must
+ * provide the flags to MI code.
+ */
+
+#define bus_space_barrier(t, h, o, l, f) \
+ ((void)((void)(t), (void)(h), (void)(o), (void)(l), (void)(f)))
+#define BUS_SPACE_BARRIER_READ 0x01 /* force read barrier */
+#define BUS_SPACE_BARRIER_WRITE 0x02 /* force write barrier */
+
+#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
+
+/*
+ * Bus DMA methods.
+ */
+
+/*
+ * Flags used in various bus DMA methods.
+ */
+#define BUS_DMA_WAITOK 0x00 /* safe to sleep (pseudo-flag) */
+#define BUS_DMA_NOWAIT 0x01 /* not safe to sleep */
+#define BUS_DMA_ALLOCNOW 0x02 /* perform resource allocation now */
+#define BUS_DMA_COHERENT 0x04 /* hint: map memory DMA coherent */
+#define BUS_DMA_BUS1 0x10 /* placeholders for bus functions... */
+#define BUS_DMA_BUS2 0x20
+#define BUS_DMA_BUS3 0x40
+#define BUS_DMA_BUS4 0x80
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+
+/*
+ * Operations performed by bus_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
+
+typedef struct macppc_bus_dma_tag *bus_dma_tag_t;
+typedef struct macppc_bus_dmamap *bus_dmamap_t;
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+struct macppc_bus_dma_segment {
+ bus_addr_t ds_addr; /* DMA address */
+ bus_size_t ds_len; /* length of transfer */
+};
+typedef struct macppc_bus_dma_segment bus_dma_segment_t;
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the implementation of
+ * DMA for a given bus.
+ */
+
+struct macppc_bus_dma_tag {
+ /*
+ * The `bounce threshold' is checked while we are loading
+ * the DMA map. If the physical address of the segment
+ * exceeds the threshold, an error will be returned. The
+ * caller can then take whatever action is necessary to
+ * bounce the transfer. If this value is 0, it will be
+ * ignored.
+ */
+ bus_addr_t _bounce_thresh;
+
+ /*
+ * DMA mapping methods.
+ */
+ int (*_dmamap_create) __P((bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *));
+ void (*_dmamap_destroy) __P((bus_dma_tag_t, bus_dmamap_t));
+ int (*_dmamap_load) __P((bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int));
+ int (*_dmamap_load_mbuf) __P((bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int));
+ int (*_dmamap_load_uio) __P((bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int));
+ int (*_dmamap_load_raw) __P((bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int));
+ void (*_dmamap_unload) __P((bus_dma_tag_t, bus_dmamap_t));
+ void (*_dmamap_sync) __P((bus_dma_tag_t, bus_dmamap_t,
+ bus_addr_t, bus_size_t, int));
+
+ /*
+ * DMA memory utility functions.
+ */
+ int (*_dmamem_alloc) __P((bus_dma_tag_t, bus_size_t, bus_size_t,
+ bus_size_t, bus_dma_segment_t *, int, int *, int));
+ void (*_dmamem_free) __P((bus_dma_tag_t,
+ bus_dma_segment_t *, int));
+ int (*_dmamem_map) __P((bus_dma_tag_t, bus_dma_segment_t *,
+ int, size_t, caddr_t *, int));
+ void (*_dmamem_unmap) __P((bus_dma_tag_t, caddr_t, size_t));
+ vm_offset_t (*_dmamem_mmap) __P((bus_dma_tag_t, bus_dma_segment_t *,
+ int, off_t, int, int));
+};
+
+#define bus_dmamap_create(t, s, n, m, b, f, p) \
+ (*(t)->_dmamap_create)((t), (s), (n), (m), (b), (f), (p))
+#define bus_dmamap_destroy(t, p) \
+ (*(t)->_dmamap_destroy)((t), (p))
+#define bus_dmamap_load(t, m, b, s, p, f) \
+ (*(t)->_dmamap_load)((t), (m), (b), (s), (p), (f))
+#define bus_dmamap_load_mbuf(t, m, b, f) \
+ (*(t)->_dmamap_load_mbuf)((t), (m), (b), (f))
+#define bus_dmamap_load_uio(t, m, u, f) \
+ (*(t)->_dmamap_load_uio)((t), (m), (u), (f))
+#define bus_dmamap_load_raw(t, m, sg, n, s, f) \
+ (*(t)->_dmamap_load_raw)((t), (m), (sg), (n), (s), (f))
+#define bus_dmamap_unload(t, p) \
+ (*(t)->_dmamap_unload)((t), (p))
+#define bus_dmamap_sync(t, p, o, l, ops) \
+ (void)((t)->_dmamap_sync ? \
+ (*(t)->_dmamap_sync)((t), (p), (o), (l), (ops)) : (void)0)
+
+#define bus_dmamem_alloc(t, s, a, b, sg, n, r, f) \
+ (*(t)->_dmamem_alloc)((t), (s), (a), (b), (sg), (n), (r), (f))
+#define bus_dmamem_free(t, sg, n) \
+ (*(t)->_dmamem_free)((t), (sg), (n))
+#define bus_dmamem_map(t, sg, n, s, k, f) \
+ (*(t)->_dmamem_map)((t), (sg), (n), (s), (k), (f))
+#define bus_dmamem_unmap(t, k, s) \
+ (*(t)->_dmamem_unmap)((t), (k), (s))
+#define bus_dmamem_mmap(t, sg, n, o, p, f) \
+ (*(t)->_dmamem_mmap)((t), (sg), (n), (o), (p), (f))
+
+/*
+ * bus_dmamap_t
+ *
+ * Describes a DMA mapping.
+ */
+struct macppc_bus_dmamap {
+ /*
+ * PRIVATE MEMBERS: not for use my machine-independent code.
+ */
+ bus_size_t _dm_size; /* largest DMA transfer mappable */
+ int _dm_segcnt; /* number of segs this map can map */
+ bus_size_t _dm_maxsegsz; /* largest possible segment */
+ bus_size_t _dm_boundary; /* don't cross this */
+ bus_addr_t _dm_bounce_thresh; /* bounce threshold; see tag */
+ int _dm_flags; /* misc. flags */
+
+ void *_dm_cookie; /* cookie for bus-specific functions */
+
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_size_t dm_mapsize; /* size of the mapping */
+ int dm_nsegs; /* # valid segments in mapping */
+ bus_dma_segment_t dm_segs[1]; /* segments; variable length */
+};
+
+#ifdef _MACPPC_BUS_DMA_PRIVATE
+int _bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t,
+ bus_size_t, int, bus_dmamap_t *));
+void _bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
+int _bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int));
+int _bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int));
+int _bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int));
+int _bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int));
+void _bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
+void _bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+ bus_size_t, int));
+
+int _bus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
+void _bus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs));
+int _bus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, size_t size, caddr_t *kvap, int flags));
+void _bus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
+ size_t size));
+vm_offset_t _bus_dmamem_mmap __P((bus_dma_tag_t tag,
+ bus_dma_segment_t *segs,
+ int nsegs, off_t off, int prot, int flags));
+
+int _bus_dmamem_alloc_range __P((bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
+ vm_offset_t low, vm_offset_t high));
+
+#endif /* _MACPPC_BUS_DMA_PRIVATE */
+
+#endif /* _MACPPC_BUS_H_ */
diff --git a/sys/powerpc/include/cpu.h b/sys/powerpc/include/cpu.h
new file mode 100644
index 0000000..3050b59
--- /dev/null
+++ b/sys/powerpc/include/cpu.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 1995-1997 Wolfgang Solfrank.
+ * Copyright (C) 1995-1997 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: cpu.h,v 1.11 2000/05/26 21:19:53 thorpej Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPU_H_
+#define _MACHINE_CPU_H_
+
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/psl.h>
+
+#define CLKF_USERMODE(frame) (((frame)->srr1 & PSL_PR) != 0)
+#define CLKF_BASEPRI(frame) ((frame)->pri == 0)
+#define CLKF_PC(frame) ((frame)->srr0)
+#define CLKF_INTR(frame) ((frame)->depth > 0)
+
+#define cpu_swapout(p)
+#define cpu_number() 0
+
+extern void delay __P((unsigned));
+#define DELAY(n) delay(n)
+
+extern int want_resched;
+extern int astpending;
+
+#define need_proftick(p) ((p)->p_flag |= PS_OWEUPC, astpending = 1)
+
+extern char bootpath[];
+
+#if defined(_KERNEL) || defined(_STANDALONE)
+#define CACHELINESIZE 32
+#endif
+
+extern void __syncicache __P((void *, int));
+
+/*
+ * CTL_MACHDEP definitions.
+ */
+#define CPU_CACHELINE 1
+#define CPU_MAXID 2
+#define CPU_CONSDEV 1
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "cachelinesize", CTLTYPE_INT }, \
+}
+
+static __inline u_int64_t
+get_cyclecount(void)
+{
+ u_int32_t upper, lower;
+ u_int64_t time;
+
+ __asm __volatile(
+ "mftb %0\n"
+ "mftbu %1"
+ : "=r" (lower), "=r" (upper));
+
+ time = (u_int64_t)upper;
+ time = (time << 32) + lower;
+ return (time);
+}
+
+#define cpu_getstack(p) ((p)->p_md.md_regs[1])
+
+void savectx __P((struct pcb *));
+
+#endif /* _MACHINE_CPU_H_ */
diff --git a/sys/powerpc/include/db_machdep.h b/sys/powerpc/include/db_machdep.h
new file mode 100644
index 0000000..c444852
--- /dev/null
+++ b/sys/powerpc/include/db_machdep.h
@@ -0,0 +1,99 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ *
+ * $OpenBSD: db_machdep.h,v 1.2 1997/03/21 00:48:48 niklas Exp $
+ * $NetBSD: db_machdep.h,v 1.4.22.1 2000/08/05 11:10:43 wiz Exp $
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+#ifndef _PPC_DB_MACHDEP_H_
+#define _PPC_DB_MACHDEP_H_
+
+#include <vm/vm_param.h>
+
+#define DB_ELF_SYMBOLS
+#define DB_ELFSIZE 32
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef long db_expr_t; /* expression - signed */
+struct powerpc_saved_state {
+ u_int32_t r[32]; /* data registers */
+ u_int32_t iar;
+ u_int32_t msr;
+};
+typedef struct powerpc_saved_state db_regs_t;
+db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->iar)
+
+#define BKPT_INST 0x7C810808 /* breakpoint instruction */
+
+#define BKPT_SIZE (4) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK(regs) ((regs)->iar -= 4)
+
+#define SR_SINGLESTEP 0x400
+#define db_clear_single_step(regs) ((regs)->msr &= ~SR_SINGLESTEP)
+#define db_set_single_step(regs) ((regs)->msr |= SR_SINGLESTEP)
+
+#define T_BREAKPOINT 0xffff
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_BREAKPOINT)
+
+#define T_WATCHPOINT 0xeeee
+#ifdef T_WATCHPOINT
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT)
+#else
+#define IS_WATCHPOINT_TRAP(type, code) 0
+#endif
+
+#define M_RTS 0xfc0007fe
+#define I_RTS 0x4c000020
+#define M_BC 0xfc000000
+#define I_BC 0x40000000
+#define M_B 0xfc000000
+#define I_B 0x50000000
+#define M_RFI 0xfc0007fe
+#define I_RFI 0x4c000064
+
+#define inst_trap_return(ins) (((ins)&M_RFI) == I_RFI)
+#define inst_return(ins) (((ins)&M_RTS) == I_RTS)
+#define inst_call(ins) (((ins)&M_BC ) == I_BC || \
+ ((ins)&M_B ) == I_B )
+#define inst_load(ins) 0
+#define inst_store(ins) 0
+
+#ifdef _KERNEL
+
+void kdb_kintr __P((void *));
+int kdb_trap __P((int, void *));
+
+#endif /* _KERNEL */
+
+#endif /* _PPC_DB_MACHDEP_H_ */
diff --git a/sys/powerpc/include/fpu.h b/sys/powerpc/include/fpu.h
new file mode 100644
index 0000000..1f23caa
--- /dev/null
+++ b/sys/powerpc/include/fpu.h
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (C) 1996 Wolfgang Solfrank.
+ * Copyright (C) 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: fpu.h,v 1.2 1999/12/07 15:14:56 danw Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FPU_H_
+#define _MACHINE_FPU_H_
+
+#define FPSCR_FX 0x80000000
+#define FPSCR_FEX 0x40000000
+#define FPSCR_VX 0x20000000
+#define FPSCR_OX 0x10000000
+#define FPSCR_UX 0x08000000
+#define FPSCR_ZX 0x04000000
+#define FPSCR_XX 0x02000000
+#define FPSCR_VXSNAN 0x01000000
+#define FPSCR_VXISI 0x00800000
+#define FPSCR_VXIDI 0x00400000
+#define FPSCR_VXZDZ 0x00200000
+#define FPSCR_VXIMZ 0x00100000
+#define FPSCR_VXVC 0x00080000
+#define FPSCR_FR 0x00040000
+#define FPSCR_FI 0x00020000
+#define FPSCR_FPRF 0x0001f000
+#define FPSCR_C 0x00010000
+#define FPSCR_FPCC 0x0000f000
+#define FPSCR_FL 0x00008000
+#define FPSCR_FG 0x00004000
+#define FPSCR_FE 0x00002000
+#define FPSCR_FU 0x00001000
+#define FPSCR_VXSOFT 0x00000400
+#define FPSCR_VXSQRT 0x00000200
+#define FPSCR_VXCVI 0x00000100
+#define FPSCR_VE 0x00000080
+#define FPSCR_OE 0x00000040
+#define FPSCR_UE 0x00000020
+#define FPSCR_ZE 0x00000010
+#define FPSCR_XE 0x00000008
+#define FPSCR_NI 0x00000004
+#define FPSCR_RN 0x00000003
+
+#endif /* _MACHINE_FPU_H_ */
diff --git a/sys/powerpc/include/intr.h b/sys/powerpc/include/intr.h
new file mode 100644
index 0000000..dac84ec
--- /dev/null
+++ b/sys/powerpc/include/intr.h
@@ -0,0 +1,163 @@
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Charles M. Hannum.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: intr.h,v 1.6 2000/02/11 13:15:44 tsubai Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACPPC_INTR_H_
+#define _MACPPC_INTR_H_
+
+/* Interrupt priority `levels'. */
+#define IPL_NONE 9 /* nothing */
+#define IPL_SOFTCLOCK 8 /* timeouts */
+#define IPL_SOFTNET 7 /* protocol stacks */
+#define IPL_BIO 6 /* block I/O */
+#define IPL_NET 5 /* network */
+#define IPL_SOFTSERIAL 4 /* serial */
+#define IPL_TTY 3 /* terminal */
+#define IPL_IMP 3 /* memory allocation */
+#define IPL_AUDIO 2 /* audio */
+#define IPL_CLOCK 1 /* clock */
+#define IPL_HIGH 1 /* everything */
+#define IPL_SERIAL 0 /* serial */
+#define NIPL 10
+
+/* Interrupt sharing types. */
+#define IST_NONE 0 /* none */
+#define IST_PULSE 1 /* pulsed */
+#define IST_EDGE 2 /* edge-triggered */
+#define IST_LEVEL 3 /* level-triggered */
+
+#ifndef LOCORE
+
+#if 0
+/*
+ * Interrupt handler chains. intr_establish() inserts a handler into
+ * the list. The handler is called with its (single) argument.
+ */
+struct intrhand {
+ int (*ih_fun) __P((void *));
+ void *ih_arg;
+ u_long ih_count;
+ struct intrhand *ih_next;
+ int ih_level;
+ int ih_irq;
+};
+#endif
+
+void setsoftclock __P((void));
+void clearsoftclock __P((void));
+void setsoftnet __P((void));
+void clearsoftnet __P((void));
+
+void do_pending_int __P((void));
+
+static __inline void softintr __P((int));
+
+extern u_int cpl, ipending, tickspending;
+extern int imask[];
+
+/* Following code should be implemented with lwarx/stwcx to avoid
+ * the disable/enable. i need to read the manual once more.... */
+static __inline void
+softintr(ipl)
+ int ipl;
+{
+ int msrsave;
+
+ __asm__ volatile("mfmsr %0" : "=r"(msrsave));
+ __asm__ volatile("mtmsr %0" :: "r"(msrsave & ~PSL_EE));
+ ipending |= 1 << ipl;
+ __asm__ volatile("mtmsr %0" :: "r"(msrsave));
+}
+
+#define ICU_LEN 64
+
+/* Soft interrupt masks. */
+#define SIR_CLOCK 28
+#define SIR_NET 29
+#define SIR_SERIAL 30
+#define SPL_CLOCK 31
+
+#if 0
+
+/*
+ * Hardware interrupt masks
+ */
+
+#define splbio() splraise(imask[IPL_BIO])
+#define splnet() splraise(imask[IPL_NET])
+#define spltty() splraise(imask[IPL_TTY])
+#define splaudio() splraise(imask[IPL_AUDIO])
+#define splclock() splraise(imask[IPL_CLOCK])
+#define splstatclock() splclock()
+#define splserial() splraise(imask[IPL_SERIAL])
+
+#define spllpt() spltty()
+
+/*
+ * Software interrupt masks
+ *
+ * NOTE: splsoftclock() is used by hardclock() to lower the priority from
+ * clock to softclock before it calls softclock().
+ */
+#define spllowersoftclock() spllower(imask[IPL_SOFTCLOCK])
+#define splsoftclock() splraise(imask[IPL_SOFTCLOCK])
+#define splsoftnet() splraise(imask[IPL_SOFTNET])
+#define splsoftserial() splraise(imask[IPL_SOFTSERIAL])
+
+/*
+ * Miscellaneous
+ */
+#define splimp() splraise(imask[IPL_IMP])
+#define splhigh() splraise(imask[IPL_HIGH])
+#define spl0() spllower(0)
+
+#endif /* 0 */
+
+#define setsoftclock() softintr(SIR_CLOCK)
+#define setsoftnet() softintr(SIR_NET)
+#define setsoftserial() softintr(SIR_SERIAL)
+
+#define CNT_IRQ0 0
+#define CNT_CLOCK 64
+#define CNT_SOFTCLOCK 65
+#define CNT_SOFTNET 66
+#define CNT_SOFTSERIAL 67
+
+#endif /* !LOCORE */
+
+#endif /* !_MACPPC_INTR_H_ */
diff --git a/sys/powerpc/include/pcb.h b/sys/powerpc/include/pcb.h
new file mode 100644
index 0000000..6bc7219
--- /dev/null
+++ b/sys/powerpc/include/pcb.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: pcb.h,v 1.4 2000/06/04 11:57:17 tsubai Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCB_H_
+#define _MACHINE_PCB_H_
+
+typedef int faultbuf[23];
+
+struct pcb {
+ struct pmap *pcb_pm; /* pmap of our vmspace */
+ struct pmap *pcb_pmreal; /* real address of above */
+ register_t pcb_sp; /* saved SP */
+ int pcb_spl; /* saved SPL */
+ faultbuf *pcb_onfault; /* For use during copyin/copyout */
+ int pcb_flags;
+#define PCB_FPU 1 /* Process had FPU initialized */
+ struct fpu {
+ double fpr[32];
+ double fpscr; /* FPSCR stored as double for easier access */
+ } pcb_fpu; /* Floating point processor */
+};
+
+struct md_coredump {
+ struct trapframe frame;
+ struct fpu fpstate;
+};
+
+#ifdef _KERNEL
+
+#ifndef curpcb
+extern struct pcb *curpcb;
+#endif
+
+extern struct pmap *curpm;
+extern struct proc *fpuproc;
+
+#endif
+#endif /* _MACHINE_PCB_H_ */
diff --git a/sys/powerpc/include/pio.h b/sys/powerpc/include/pio.h
new file mode 100644
index 0000000..761eb17
--- /dev/null
+++ b/sys/powerpc/include/pio.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 1997 Per Fogelstrom, Opsycon AB and RTMX Inc, USA.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Per Fogelstrom Opsycon AB for RTMX Inc, North Carolina, USA.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $NetBSD: pio.h,v 1.1 1998/05/15 10:15:54 tsubai Exp $
+ * $OpenBSD: pio.h,v 1.1 1997/10/13 10:53:47 pefo Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PIO_H_
+#define _MACHINE_PIO_H_
+/*
+ * I/O macros.
+ */
+
+static __inline void
+__outb(volatile u_int8_t *a, u_int8_t v)
+{
+ *a = v;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__outw(volatile u_int16_t *a, u_int16_t v)
+{
+ *a = v;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__outl(volatile u_int32_t *a, u_int32_t v)
+{
+ *a = v;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__outwrb(volatile u_int16_t *a, u_int16_t v)
+{
+ __asm__ volatile("sthbrx %0, 0, %1" :: "r"(v), "r"(a));
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__outlrb(volatile u_int32_t *a, u_int32_t v)
+{
+ __asm__ volatile("stwbrx %0, 0, %1" :: "r"(v), "r"(a));
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline u_int8_t
+__inb(volatile u_int8_t *a)
+{
+ u_int8_t _v_;
+
+ _v_ = *a;
+ __asm__ volatile("eieio; sync");
+ return _v_;
+}
+
+static __inline u_int16_t
+__inw(volatile u_int16_t *a)
+{
+ u_int16_t _v_;
+
+ _v_ = *a;
+ __asm__ volatile("eieio; sync");
+ return _v_;
+}
+
+static __inline u_int32_t
+__inl(volatile u_int32_t *a)
+{
+ u_int32_t _v_;
+
+ _v_ = *a;
+ __asm__ volatile("eieio; sync");
+ return _v_;
+}
+
+static __inline u_int16_t
+__inwrb(volatile u_int16_t *a)
+{
+ u_int16_t _v_;
+
+ __asm__ volatile("lhbrx %0, 0, %1" : "=r"(_v_) : "r"(a));
+ __asm__ volatile("eieio; sync");
+ return _v_;
+}
+
+static __inline u_int32_t
+__inlrb(volatile u_int32_t *a)
+{
+ u_int32_t _v_;
+
+ __asm__ volatile("lwbrx %0, 0, %1" : "=r"(_v_) : "r"(a));
+ __asm__ volatile("eieio; sync");
+ return _v_;
+}
+
+#define outb(a,v) (__outb((volatile u_int8_t *)(a), v))
+#define out8(a,v) outb(a,v)
+#define outw(a,v) (__outw((volatile u_int16_t *)(a), v))
+#define out16(a,v) outw(a,v)
+#define outl(a,v) (__outl((volatile u_int32_t *)(a), v))
+#define out32(a,v) outl(a,v)
+#define inb(a) (__inb((volatile u_int8_t *)(a)))
+#define in8(a) inb(a)
+#define inw(a) (__inw((volatile u_int16_t *)(a)))
+#define in16(a) inw(a)
+#define inl(a) (__inl((volatile u_int32_t *)(a)))
+#define in32(a) inl(a)
+
+#define out8rb(a,v) outb(a,v)
+#define outwrb(a,v) (__outwrb((volatile u_int16_t *)(a), v))
+#define out16rb(a,v) outwrb(a,v)
+#define outlrb(a,v) (__outlrb((volatile u_int32_t *)(a), v))
+#define out32rb(a,v) outlrb(a,v)
+#define in8rb(a) inb(a)
+#define inwrb(a) (__inwrb((volatile u_int16_t *)(a)))
+#define in16rb(a) inwrb(a)
+#define inlrb(a) (__inlrb((volatile u_int32_t *)(a)))
+#define in32rb(a) inlrb(a)
+
+
+static __inline void
+__outsb(volatile u_int8_t *a, const u_int8_t *s, size_t c)
+{
+ while (c--)
+ *a = *s++;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__outsw(volatile u_int16_t *a, const u_int16_t *s, size_t c)
+{
+ while (c--)
+ *a = *s++;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__outsl(volatile u_int32_t *a, const u_int32_t *s, size_t c)
+{
+ while (c--)
+ *a = *s++;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__outswrb(volatile u_int16_t *a, const u_int16_t *s, size_t c)
+{
+ while (c--)
+ __asm__ volatile("sthbrx %0, 0, %1" :: "r"(*s++), "r"(a));
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__outslrb(volatile u_int32_t *a, const u_int32_t *s, size_t c)
+{
+ while (c--)
+ __asm__ volatile("stwbrx %0, 0, %1" :: "r"(*s++), "r"(a));
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__insb(volatile u_int8_t *a, u_int8_t *d, size_t c)
+{
+ while (c--)
+ *d++ = *a;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__insw(volatile u_int16_t *a, u_int16_t *d, size_t c)
+{
+ while (c--)
+ *d++ = *a;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__insl(volatile u_int32_t *a, u_int32_t *d, size_t c)
+{
+ while (c--)
+ *d++ = *a;
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__inswrb(volatile u_int16_t *a, u_int16_t *d, size_t c)
+{
+ while (c--)
+ __asm__ volatile("lhbrx %0, 0, %1" : "=r"(*d++) : "r"(a));
+ __asm__ volatile("eieio; sync");
+}
+
+static __inline void
+__inslrb(volatile u_int32_t *a, u_int32_t *d, size_t c)
+{
+ while (c--)
+ __asm__ volatile("lwbrx %0, 0, %1" : "=r"(*d++) : "r"(a));
+ __asm__ volatile("eieio; sync");
+}
+
+#define outsb(a,s,c) (__outsb((volatile u_int8_t *)(a), s, c))
+#define outs8(a,s,c) outsb(a,s,c)
+#define outsw(a,s,c) (__outsw((volatile u_int16_t *)(a), s, c))
+#define outs16(a,s,c) outsw(a,s,c)
+#define outsl(a,s,c) (__outsl((volatile u_int32_t *)(a), s, c))
+#define outs32(a,s,c) outsl(a,s,c)
+#define insb(a,d,c) (__insb((volatile u_int8_t *)(a), d, c))
+#define ins8(a,d,c) insb(a,d,c)
+#define insw(a,d,c) (__insw((volatile u_int16_t *)(a), d, c))
+#define ins16(a,d,c) insw(a,d,c)
+#define insl(a,d,c) (__insl((volatile u_int32_t *)(a), d, c))
+#define ins32(a,d,c) insl(a,d,c)
+
+#define outs8rb(a,s,c) outsb(a,s,c)
+#define outswrb(a,s,c) (__outswrb((volatile u_int16_t *)(a), s, c))
+#define outs16rb(a,s,c) outswrb(a,s,c)
+#define outslrb(a,s,c) (__outslrb((volatile u_int32_t *)(a), s, c))
+#define outs32rb(a,s,c) outslrb(a,s,c)
+#define ins8rb(a,d,c) insb(a,d,c)
+#define inswrb(a,d,c) (__inswrb((volatile u_int16_t *)(a), d, c))
+#define ins16rb(a,d,c) inswrb(a,d,c)
+#define inslrb(a,d,c) (__inslrb((volatile u_int32_t *)(a), d, c))
+#define ins32rb(a,d,c) inslrb(a,d,c)
+
+#endif /*_MACHINE_PIO_H_*/
diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
new file mode 100644
index 0000000..83adf0d
--- /dev/null
+++ b/sys/powerpc/include/pmap.h
@@ -0,0 +1,134 @@
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+
+#include <machine/pte.h>
+
+/*
+ * Segment registers
+ */
+#ifndef LOCORE
+typedef u_int sr_t;
+#endif /* LOCORE */
+#define SR_TYPE 0x80000000
+#define SR_SUKEY 0x40000000
+#define SR_PRKEY 0x20000000
+#define SR_VSID 0x00ffffff
+
+#ifndef LOCORE
+
+struct pv_entry {
+ struct pv_entry *pv_next; /* Linked list of mappings */
+ int pv_idx; /* Index into ptable */
+ vm_offset_t pv_va; /* virtual address of mapping */
+};
+
+struct md_page {
+ int pv_list_count;
+ int pv_flags;
+ TAILQ_HEAD(,pv_entry) pv_list;
+};
+
+/*
+ * Pmap stuff
+ */
+struct pmap {
+ sr_t pm_sr[16]; /* segments used in this pmap */
+ int pm_refs; /* ref count */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+};
+
+typedef struct pmap *pmap_t;
+
+typedef struct pv_entry *pv_entry_t;
+
+#ifdef _KERNEL
+
+#define pmap_clear_modify(pg) (ptemodify((pg), PTE_CHG, 0))
+#define pmap_clear_reference(pg) (ptemodify((pg), PTE_REF, 0))
+#define pmap_is_modified(pg) (ptebits((pg), PTE_CHG))
+#define pmap_is_referenced(pg) (ptebits((pg), PTE_REF))
+#define pmap_unwire(pm, va)
+
+#define pmap_phys_address(x) (x)
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+
+extern pmap_t kernel_pmap;
+
+extern vm_offset_t avail_end;
+extern vm_offset_t avail_start;
+extern vm_offset_t phys_avail[];
+extern vm_offset_t virtual_avail;
+extern vm_offset_t virtual_end;
+
+void pmap_bootstrap __P((u_int kernelstart, u_int kernelend));
+vm_offset_t pmap_steal_memory __P((vm_size_t));
+boolean_t ptemodify __P((struct vm_page *, u_int, u_int));
+int ptebits __P((struct vm_page *, int));
+
+#if 0
+#define PMAP_NEED_PROCWR
+void pmap_procwr __P((struct proc *, vaddr_t, size_t));
+#endif
+
+/*
+ * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
+ *
+ * Note: This won't work if we have more memory than can be direct-mapped
+ * VA==PA all at once. But pmap_copy_page() and pmap_zero_page() will have
+ * this problem, too.
+ */
+#define PMAP_MAP_POOLPAGE(pa) (pa)
+#define PMAP_UNMAP_POOLPAGE(pa) (pa)
+
+#define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
+
+extern pte_t PTmap[];
+
+#define vtopte(x) (PTmap + powerpc_btop(x))
+
+static __inline vm_offset_t
+pmap_kextract(vm_offset_t va)
+{
+ /* XXX: coming soon... */
+ return (0);
+}
+
+#endif /* _KERNEL */
+#endif /* LOCORE */
+
+#endif /* _MACHINE_PMAP_H_ */
diff --git a/sys/powerpc/include/powerpc.h b/sys/powerpc/include/powerpc.h
new file mode 100644
index 0000000..ceeda2f
--- /dev/null
+++ b/sys/powerpc/include/powerpc.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 1996 Wolfgang Solfrank.
+ * Copyright (C) 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: powerpc.h,v 1.3 2000/06/01 00:49:59 matt Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_POWERPC_H_
+#define _MACHINE_POWERPC_H_
+
+struct mem_region {
+ vm_offset_t start;
+ vm_size_t size;
+};
+
+void mem_regions __P((struct mem_region **, struct mem_region **));
+
+/*
+ * These two functions get used solely in boot() in machdep.c.
+ *
+ * Not sure whether boot itself should be implementation dependent instead. XXX
+ */
+void ppc_exit __P((void));
+void ppc_boot __P((char *bootspec));
+
+int dk_match __P((char *name));
+
+void ofrootfound __P((void));
+
+extern int booted_partition;
+
+#endif /* _MACHINE_POWERPC_H_ */
diff --git a/sys/powerpc/include/proc.h b/sys/powerpc/include/proc.h
new file mode 100644
index 0000000..370a9b2
--- /dev/null
+++ b/sys/powerpc/include/proc.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: proc.h,v 1.2 1997/04/16 22:57:48 thorpej Exp $
+ * $FreeBSD$
+ */
+
+#include <machine/globals.h>
+
+/*
+ * Machine-dependent part of the proc structure
+ */
+struct mdproc {
+ int md_regs[32];
+};
diff --git a/sys/powerpc/include/psl.h b/sys/powerpc/include/psl.h
new file mode 100644
index 0000000..acdd00d
--- /dev/null
+++ b/sys/powerpc/include/psl.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: psl.h,v 1.4 2000/02/13 10:25:07 tsubai Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PSL_H_
+#define _MACHINE_PSL_H_
+
+/*
+ * Machine State Register (MSR)
+ *
+ * The PowerPC 601 does not implement the following bits:
+ *
+ * POW, ILE, BE, RI, LE[*]
+ *
+ * [*] Little-endian mode on the 601 is implemented in the HID0 register.
+ */
+#define PSL_POW 0x00040000 /* power management */
+#define PSL_ILE 0x00010000 /* interrupt endian mode (1 == le) */
+#define PSL_EE 0x00008000 /* external interrupt enable */
+#define PSL_PR 0x00004000 /* privilege mode (1 == user) */
+#define PSL_FP 0x00002000 /* floating point enable */
+#define PSL_ME 0x00001000 /* machine check enable */
+#define PSL_FE0 0x00000800 /* floating point interrupt mode 0 */
+#define PSL_SE 0x00000400 /* single-step trace enable */
+#define PSL_BE 0x00000200 /* branch trace enable */
+#define PSL_FE1 0x00000100 /* floating point interrupt mode 1 */
+#define PSL_IP 0x00000040 /* interrupt prefix */
+#define PSL_IR 0x00000020 /* instruction address relocation */
+#define PSL_DR 0x00000010 /* data address relocation */
+#define PSL_RI 0x00000002 /* recoverable interrupt */
+#define PSL_LE 0x00000001 /* endian mode (1 == le) */
+
+#define PSL_601_MASK ~(PSL_POW|PSL_ILE|PSL_BE|PSL_RI|PSL_LE)
+
+/*
+ * Floating-point exception modes:
+ */
+#define PSL_FE_DIS 0 /* none */
+#define PSL_FE_NONREC PSL_FE1 /* imprecise non-recoverable */
+#define PSL_FE_REC PSL_FE0 /* imprecise recoverable */
+#define PSL_FE_PREC (PSL_FE0 | PSL_FE1) /* precise */
+#define PSL_FE_DFLT PSL_FE_DIS /* default == none */
+
+/*
+ * Note that PSL_POW and PSL_ILE are not in the saved copy of the MSR
+ */
+#define PSL_MBO 0
+#define PSL_MBZ 0
+
+#define PSL_USERSET (PSL_EE | PSL_PR | PSL_ME | PSL_IR | PSL_DR | PSL_RI)
+
+#define PSL_USERSTATIC (PSL_USERSET | PSL_IP | 0x87c0008c)
+
+#endif /* _MACHINE_PSL_H_ */
diff --git a/sys/powerpc/include/pte.h b/sys/powerpc/include/pte.h
new file mode 100644
index 0000000..3d1ddcf
--- /dev/null
+++ b/sys/powerpc/include/pte.h
@@ -0,0 +1,112 @@
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: pte.h,v 1.2 1998/08/31 14:43:40 tsubai Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PTE_H_
+#define _MACHINE_PTE_H_
+
+/*
+ * Page Table Entries
+ */
+#ifndef LOCORE
+#include <sys/queue.h>
+
+struct pte {
+ u_int pte_hi;
+ u_int pte_lo;
+};
+#endif /* LOCORE */
+/* High word: */
+#define PTE_VALID 0x80000000
+#define PTE_VSID_SHFT 7
+#define PTE_HID 0x00000040
+#define PTE_API 0x0000003f
+/* Low word: */
+#define PTE_RPGN 0xfffff000
+#define PTE_REF 0x00000100
+#define PTE_CHG 0x00000080
+#define PTE_WIMG 0x00000078
+#define PTE_W 0x00000040
+#define PTE_I 0x00000020
+#define PTE_M 0x00000010
+#define PTE_G 0x00000008
+#define PTE_PP 0x00000003
+#define PTE_RO 0x00000003
+#define PTE_RW 0x00000002
+
+#ifndef LOCORE
+typedef struct pte pte_t;
+#endif /* LOCORE */
+
+/*
+ * Extract bits from address
+ */
+#define ADDR_SR_SHFT 28
+#define ADDR_PIDX 0x0ffff000
+#define ADDR_PIDX_SHFT 12
+#define ADDR_API_SHFT 22
+#define ADDR_POFF 0x00000fff
+
+#ifndef LOCORE
+#ifdef _KERNEL
+extern pte_t *ptable;
+extern int ptab_cnt;
+#endif /* _KERNEL */
+#endif /* LOCORE */
+
+/*
+ * Bits in DSISR:
+ */
+#define DSISR_DIRECT 0x80000000
+#define DSISR_NOTFOUND 0x40000000
+#define DSISR_PROTECT 0x08000000
+#define DSISR_INVRX 0x04000000
+#define DSISR_STORE 0x02000000
+#define DSISR_DABR 0x00400000
+#define DSISR_SEGMENT 0x00200000
+#define DSISR_EAR 0x00100000
+
+/*
+ * Bits in SRR1 on ISI:
+ */
+#define ISSRR1_NOTFOUND 0x40000000
+#define ISSRR1_DIRECT 0x10000000
+#define ISSRR1_PROTECT 0x08000000
+#define ISSRR1_SEGMENT 0x00200000
+
+#ifdef _KERNEL
+#ifndef LOCORE
+extern u_int dsisr __P((void));
+#endif /* _KERNEL */
+#endif /* LOCORE */
+#endif /* _MACHINE_PTE_H_ */
diff --git a/sys/powerpc/include/reg.h b/sys/powerpc/include/reg.h
new file mode 100644
index 0000000..dfe9d96
--- /dev/null
+++ b/sys/powerpc/include/reg.h
@@ -0,0 +1,27 @@
+/* $NetBSD: reg.h,v 1.4 2000/06/04 09:30:44 tsubai Exp $ */
+/* $FreeBSD$ */
+
+#ifndef _POWERPC_REG_H_
+#define _POWERPC_REG_H_
+
+struct reg {
+ register_t fixreg[32];
+ register_t lr;
+ int cr;
+ int xer;
+ register_t ctr;
+ register_t pc;
+};
+
+struct fpreg {
+ double fpreg[32];
+ double fpscr;
+};
+
+struct dbreg {
+ unsigned long junk;
+};
+
+void setregs(struct proc *, u_long, u_long, u_long);
+
+#endif /* _POWERPC_REG_H_ */
diff --git a/sys/powerpc/include/signal.h b/sys/powerpc/include/signal.h
new file mode 100644
index 0000000..ea9c9be
--- /dev/null
+++ b/sys/powerpc/include/signal.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: signal.h,v 1.4 1998/09/14 02:48:34 thorpej Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SIGNAL_H_
+#define _MACHINE_SIGNAL_H_
+
+#define MINSIGSTKSZ (512 * 4)
+
+typedef int sig_atomic_t;
+
+typedef unsigned int osigset_t;
+
+#if !defined(_ANSI_SOURCE) && !defined(_POSIX_C_SOURCE) && \
+ !defined(_XOPEN_SOURCE)
+#include <machine/frame.h>
+
+#if defined(__LIBC12_SOURCE__) || defined(_KERNEL)
+struct sigcontext13 {
+ int sc_onstack; /* saved onstack flag */
+ int sc_mask; /* saved signal mask (old style) */
+ struct trapframe sc_frame; /* saved registers */
+};
+#endif /* __LIBC12_SOURCE__ || _KERNEL */
+
+struct osigcontext {
+ int sc_onstack; /* saved onstack flag */
+ int __sc_mask13; /* saved signal mask (old style) */
+ struct trapframe sc_frame; /* saved registers */
+ sigset_t sc_mask; /* saved signal mask (new style) */
+};
+
+struct sigcontext {
+ int sc_onstack; /* saved onstack flag */
+ int __sc_mask13; /* saved signal mask (old style) */
+ struct trapframe sc_frame; /* saved registers */
+ sigset_t sc_mask; /* saved signal mask (new style) */
+};
+
+#endif /* !_ANSI_SOURCE && !_POSIX_C_SOURCE && !_XOPEN_SOURCE */
+#endif /* !_MACHINE_SIGNAL_H_ */
diff --git a/sys/powerpc/include/stdarg.h b/sys/powerpc/include/stdarg.h
new file mode 100644
index 0000000..4cabdaf
--- /dev/null
+++ b/sys/powerpc/include/stdarg.h
@@ -0,0 +1,123 @@
+/*-
+ * Copyright (c) 2000 Tsubai Masanari. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: stdarg.h,v 1.5 2000/02/27 17:50:21 tsubai Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _POWERPC_STDARG_H_
+#define _POWERPC_STDARG_H_
+
+#include <machine/ansi.h>
+
+#if 0
+typedef struct {
+ char __gpr; /* GPR offset */
+ char __fpr; /* FPR offset */
+/* char __pad[2]; */
+ char *__stack; /* args passed on stack */
+ char *__base; /* args passed by registers (r3-r10, f1-f8) */
+} va_list;
+#endif
+
+typedef _BSD_VA_LIST_ va_list;
+
+#ifdef __lint__
+
+#define va_start(ap, last) ((ap) = *(va_list *)0)
+#define va_arg(ap, type) (*(type *)(void *)&(ap))
+
+#else
+
+#define va_start(ap, last) \
+ (__builtin_next_arg(last), \
+ (ap).__stack = __va_stack_args, \
+ (ap).__base = __va_reg_args, \
+ (ap).__gpr = __va_first_gpr, \
+ (ap).__fpr = __va_first_fpr)
+
+#define __va_first_gpr (__builtin_args_info(0))
+#define __va_first_fpr (__builtin_args_info(1) - 32 - 1)
+#define __va_stack_args \
+ ((char *)__builtin_saveregs() + \
+ (__va_first_gpr >= 8 ? __va_first_gpr - 8 : 0) * sizeof(int))
+#define __va_reg_args \
+ ((char *)__builtin_frame_address(0) + __builtin_args_info(4))
+
+#define __INTEGER_TYPE_CLASS 1
+#define __REAL_TYPE_CLASS 8
+#define __RECORD_TYPE_CLASS 12
+
+#define __va_longlong(type) \
+ (__builtin_classify_type(*(type *)0) == __INTEGER_TYPE_CLASS && \
+ sizeof(type) == 8)
+
+#define __va_double(type) \
+ (__builtin_classify_type(*(type *)0) == __REAL_TYPE_CLASS)
+
+#define __va_struct(type) \
+ (__builtin_classify_type(*(type *)0) >= __RECORD_TYPE_CLASS)
+
+#define __va_size(type) \
+ ((sizeof(type) + sizeof(int) - 1) / sizeof(int) * sizeof(int))
+
+#define __va_savedgpr(ap, type) \
+ ((ap).__base + (ap).__gpr * sizeof(int) - sizeof(type))
+
+#define __va_savedfpr(ap, type) \
+ ((ap).__base + 8 * sizeof(int) + (ap).__fpr * sizeof(double) - \
+ sizeof(type))
+
+#define __va_stack(ap, type) \
+ ((ap).__stack += __va_size(type) + \
+ (__va_longlong(type) ? (int)(ap).__stack & 4 : 0), \
+ (ap).__stack - sizeof(type))
+
+#define __va_gpr(ap, type) \
+ ((ap).__gpr += __va_size(type) / sizeof(int) + \
+ (__va_longlong(type) ? (ap).__gpr & 1 : 0), \
+ (ap).__gpr <= 8 ? __va_savedgpr(ap, type) : __va_stack(ap, type))
+
+#define __va_fpr(ap, type) \
+ ((ap).__fpr++, \
+ (ap).__fpr <= 8 ? __va_savedfpr(ap, type) : __va_stack(ap, type))
+
+#define va_arg(ap, type) \
+ (*(type *)(__va_struct(type) ? (*(void **)__va_gpr(ap, void *)) : \
+ __va_double(type) ? __va_fpr(ap, type) : \
+ __va_gpr(ap, type)))
+
+#endif /* __lint__ */
+
+#define va_end(ap)
+
+#if !defined(_ANSI_SOURCE) && \
+ (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE) || \
+ defined(_ISOC99_SOURCE) || (__STDC_VERSION__ - 0) >= 199901L)
+#define va_copy(dest, src) \
+ ((dest) = (src))
+#endif
+
+#endif /* _POWERPC_STDARG_H_ */
diff --git a/sys/powerpc/include/trap.h b/sys/powerpc/include/trap.h
new file mode 100644
index 0000000..dd359d2
--- /dev/null
+++ b/sys/powerpc/include/trap.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: trap.h,v 1.3 2000/05/25 21:10:14 is Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _POWERPC_TRAP_H_
+#define _POWERPC_TRAP_H_
+
+#define EXC_RSVD 0x0000 /* Reserved */
+#define EXC_RST 0x0100 /* Reset */
+#define EXC_MCHK 0x0200 /* Machine Check */
+#define EXC_DSI 0x0300 /* Data Storage Interrupt */
+#define EXC_ISI 0x0400 /* Instruction Storage Interrupt */
+#define EXC_EXI 0x0500 /* External Interrupt */
+#define EXC_ALI 0x0600 /* Alignment Interrupt */
+#define EXC_PGM 0x0700 /* Program Interrupt */
+#define EXC_FPU 0x0800 /* Floating-point Unavailable */
+#define EXC_DECR 0x0900 /* Decrementer Interrupt */
+#define EXC_SC 0x0c00 /* System Call */
+#define EXC_TRC 0x0d00 /* Trace */
+#define EXC_FPA 0x0e00 /* Floating-point Assist */
+
+/* The following are only available on 604: */
+#define EXC_PERF 0x0f00 /* Performance Monitoring */
+#define EXC_BPT 0x1300 /* Instruction Breakpoint */
+#define EXC_SMI 0x1400 /* System Managment Interrupt */
+
+/* And these are only on the 603: */
+#define EXC_IMISS 0x1000 /* Instruction translation miss */
+#define EXC_DLMISS 0x1100 /* Data load translation miss */
+#define EXC_DSMISS 0x1200 /* Data store translation miss */
+
+#define EXC_LAST 0x2f00 /* Last possible exception vector */
+
+#define EXC_AST 0x3000 /* Fake AST vector */
+
+/* Trap was in user mode */
+#define EXC_USER 0x10000
+
+
+/*
+ * EXC_ALI sets bits in the DSISR and DAR to provide enough
+ * information to recover from the unaligned access without needing to
+ * parse the offending instruction. This includes certain bits of the
+ * opcode, and information about what registers are used. The opcode
+ * indicator values below come from Appendix F of Book III of "The
+ * PowerPC Architecture".
+ */
+
+#define EXC_ALI_OPCODE_INDICATOR(dsisr) ((dsisr >> 10) & 0x7f)
+#define EXC_ALI_LFD 0x09
+#define EXC_ALI_STFD 0x0b
+
+/* Macros to extract register information */
+#define EXC_ALI_RST(dsisr) ((dsisr >> 5) & 0x1f) /* source or target */
+#define EXC_ALI_RA(dsisr) (dsisr & 0x1f)
+
+#ifndef LOCORE
+
+void trap(struct trapframe *);
+
+#endif /* !LOCORE */
+
+#endif /* _POWERPC_TRAP_H_ */
diff --git a/sys/powerpc/include/varargs.h b/sys/powerpc/include/varargs.h
new file mode 100644
index 0000000..3e1c0a9
--- /dev/null
+++ b/sys/powerpc/include/varargs.h
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 2000 Tsubai Masanari. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ * $NetBSD: varargs.h,v 1.5 2000/02/27 17:50:22 tsubai Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _POWERPC_VARARGS_H_
+#define _POWERPC_VARARGS_H_
+
+#include <machine/stdarg.h>
+
+#define va_alist __builtin_va_alist
+#define va_dcl int __builtin_va_alist; ...
+
+#undef va_start
+
+#ifdef __lint__
+#define va_start(ap) ((ap) = *(va_list *)0)
+#else
+#define va_start(ap) \
+ ((ap).__stack = __va_stack_args, \
+ (ap).__base = __va_reg_args, \
+ (ap).__gpr = __va_first_gpr, \
+ (ap).__fpr = __va_first_fpr)
+#endif
+
+#endif /* _POWERPC_VARARGS_H_ */
diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h
new file mode 100644
index 0000000..0434e97
--- /dev/null
+++ b/sys/powerpc/include/vmparam.h
@@ -0,0 +1,124 @@
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: vmparam.h,v 1.11 2000/02/11 19:25:16 thorpej Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_VMPARAM_H_
+#define _MACHINE_VMPARAM_H_
+
+#define USRTEXT NBPG
+#define USRSTACK VM_MAXUSER_ADDRESS
+
+#ifndef MAXTSIZ
+#define MAXTSIZ (16*1024*1024) /* max text size */
+#endif
+
+#ifndef DFLDSIZ
+#define DFLDSIZ (32*1024*1024) /* default data size */
+#endif
+
+#ifndef MAXDSIZ
+#define MAXDSIZ (512*1024*1024) /* max data size */
+#endif
+
+#ifndef DFLSSIZ
+#define DFLSSIZ (1*1024*1024) /* default stack size */
+#endif
+
+#ifndef MAXSSIZ
+#define MAXSSIZ (32*1024*1024) /* max stack size */
+#endif
+
+/*
+ * Size of shared memory map
+ */
+#ifndef SHMMAXPGS
+#define SHMMAXPGS 1024
+#endif
+
+/*
+ * Size of User Raw I/O map
+ */
+#define USRIOSIZE 1024
+
+/*
+ * The time for a process to be blocked before being very swappable.
+ * This is a number of seconds which the system takes as being a non-trivial
+ * amount of real time. You probably shouldn't change this;
+ * it is used in subtle ways (fractions and multiples of it are, that is, like
+ * half of a ``long time'', almost a long time, etc.)
+ * It is related to human patience and other factors which don't really
+ * change over time.
+ */
+#define MAXSLP 20
+
+/*
+ * Would like to have MAX addresses = 0, but this doesn't (currently) work
+ */
+#define VM_MIN_ADDRESS ((vm_offset_t)0)
+#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x7ffff000)
+#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
+#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)(KERNEL_SR << ADDR_SR_SHFT))
+#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + SEGMENT_LENGTH - 1)
+
+/* XXX max. amount of KVM to be used by buffers. */
+#ifndef VM_MAX_KERNEL_BUF
+#define VM_MAX_KERNEL_BUF (SEGMENT_LENGTH * 7 / 10)
+#endif
+
+#define VM_PHYS_SIZE (USRIOSIZE * NBPG)
+
+struct pmap_physseg {
+ struct pv_entry *pvent;
+ char *attrs;
+};
+
+#define VM_PHYSSEG_MAX 16 /* 1? */
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+#define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */
+
+#define VM_NFREELIST 1
+#define VM_FREELIST_DEFAULT 0
+
+#ifndef VM_INITIAL_PAGEIN
+#define VM_INITIAL_PAGEIN 16
+#endif
+
+#ifndef SGROWSIZ
+#define SGROWSIZ (128UL*1024) /* amount to grow stack */
+#endif
+
+#ifndef VM_KMEM_SIZE
+#define VM_KMEM_SIZE (12 * 1024 * 1024)
+#endif
+
+#endif /* _MACHINE_VMPARAM_H_ */
diff --git a/sys/powerpc/powerpc/bcopy.c b/sys/powerpc/powerpc/bcopy.c
new file mode 100644
index 0000000..1938af3
--- /dev/null
+++ b/sys/powerpc/powerpc/bcopy.c
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char *sccsid = "from: @(#)bcopy.c 5.11 (Berkeley) 6/21/91";
+#endif
+#if 0
+static char *rcsid = "$NetBSD: bcopy.c,v 1.2 1997/04/16 22:09:41 thorpej Exp $";
+#endif
+#endif /* LIBC_SCCS and not lint */
+#ifndef lint
+static char *rcsid =
+ "$FreeBSD$";
+#endif
+
+#include <sys/param.h>
+#ifdef _KERNEL
+#include <sys/systm.h>
+#else
+#include <string.h>
+#endif
+
+/*
+ * sizeof(word) MUST BE A POWER OF TWO
+ * SO THAT wmask BELOW IS ALL ONES
+ */
+typedef int word; /* "word" used for optimal copy speed */
+
+#define wsize sizeof(word)
+#define wmask (wsize - 1)
+
+/*
+ * Copy a block of memory, handling overlap.
+ * This is the routine that actually implements
+ * (the portable versions of) bcopy, memcpy, and memmove.
+ */
+void *
+memcpy(void *dst0, const void *src0, size_t length)
+{
+ char *dst;
+ const char *src;
+ size_t t;
+
+ dst = dst0;
+ src = src0;
+
+ if (length == 0 || dst == src) { /* nothing to do */
+ goto done;
+ }
+
+ /*
+ * Macros: loop-t-times; and loop-t-times, t>0
+ */
+#define TLOOP(s) if (t) TLOOP1(s)
+#define TLOOP1(s) do { s; } while (--t)
+
+ if ((unsigned long)dst < (unsigned long)src) {
+ /*
+ * Copy forward.
+ */
+ t = (int)src; /* only need low bits */
+
+ if ((t | (int)dst) & wmask) {
+ /*
+ * Try to align operands. This cannot be done
+ * unless the low bits match.
+ */
+ if ((t ^ (int)dst) & wmask || length < wsize) {
+ t = length;
+ } else {
+ t = wsize - (t & wmask);
+ }
+
+ length -= t;
+ TLOOP1(*dst++ = *src++);
+ }
+ /*
+ * Copy whole words, then mop up any trailing bytes.
+ */
+ t = length / wsize;
+ TLOOP(*(word *)dst = *(const word *)src; src += wsize;
+ dst += wsize);
+ t = length & wmask;
+ TLOOP(*dst++ = *src++);
+ } else {
+ /*
+ * Copy backwards. Otherwise essentially the same.
+ * Alignment works as before, except that it takes
+ * (t&wmask) bytes to align, not wsize-(t&wmask).
+ */
+ src += length;
+ dst += length;
+ t = (int)src;
+
+ if ((t | (int)dst) & wmask) {
+ if ((t ^ (int)dst) & wmask || length <= wsize) {
+ t = length;
+ } else {
+ t &= wmask;
+ }
+
+ length -= t;
+ TLOOP1(*--dst = *--src);
+ }
+ t = length / wsize;
+ TLOOP(src -= wsize; dst -= wsize;
+ *(word *)dst = *(const word *)src);
+ t = length & wmask;
+ TLOOP(*--dst = *--src);
+ }
+done:
+ return (dst0);
+}
+
+void
+bcopy(const void *src0, void *dst0, size_t length)
+{
+
+ memcpy(dst0, src0, length);
+}
diff --git a/sys/powerpc/powerpc/clock.c b/sys/powerpc/powerpc/clock.c
new file mode 100644
index 0000000..c2a9944
--- /dev/null
+++ b/sys/powerpc/powerpc/clock.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: clock.c,v 1.9 2000/01/19 02:52:19 msaitoh Exp $ */
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/timetc.h>
+#include <sys/interrupt.h>
+
+#include <vm/vm.h>
+
+#include <dev/ofw/openfirm.h>
+
+#include <machine/clock.h>
+#include <machine/cpu.h>
+
+#if 0 /* XXX */
+#include "adb.h"
+#else
+#define NADB 0
+#endif
+
+/*
+ * Initially we assume a processor with a bus frequency of 12.5 MHz.
+ */
+static u_long ns_per_tick = 80;
+static long ticks_per_intr;
+static volatile u_long lasttb;
+
+#define SECDAY 86400
+#define DIFF19041970 2082844800
+
+#if NADB > 0
+extern int adb_read_date_time __P((int *));
+extern int adb_set_date_time __P((int));
+#endif
+
+static int clockinitted = 0;
+
+void
+inittodr(time_t base)
+{
+ time_t deltat;
+ u_int rtc_time;
+ struct timespec ts;
+
+ /*
+ * If we can't read from RTC, use the fs time.
+ */
+#if NADB > 0
+ if (adb_read_date_time(&rtc_time) < 0)
+#endif
+ {
+ ts.tv_sec = base;
+ ts.tv_nsec = 0;
+ tc_setclock(&ts);
+ return;
+ }
+ clockinitted = 1;
+ ts.tv_sec = rtc_time - DIFF19041970;
+
+ deltat = ts.tv_sec - base;
+ if (deltat < 0) {
+ deltat = -deltat;
+ }
+ if (deltat < 2 * SECDAY) {
+ tc_setclock(&ts);
+ return;
+ }
+
+ printf("WARNING: clock %s %d days",
+ ts.tv_sec < base ? "lost" : "gained", (int)(deltat / SECDAY));
+
+ printf(" -- CHECK AND RESET THE DATE!\n");
+}
+
+/*
+ * Similar to the above
+ */
+void
+resettodr()
+{
+#if NADB > 0
+ u_int rtc_time;
+
+ if (clockinitted) {
+ rtc_time = time.tv_sec + DIFF19041970;
+ adb_set_date_time(rtc_time);
+ }
+#endif
+}
+
+void
+decr_intr(struct clockframe *frame)
+{
+ u_long tb;
+ long tick;
+ int nticks;
+
+ /*
+ * Check whether we are initialized.
+ */
+ if (!ticks_per_intr)
+ return;
+
+ /*
+ * Based on the actual time delay since the last decrementer reload,
+ * we arrange for earlier interrupt next time.
+ */
+ __asm ("mftb %0; mfdec %1" : "=r"(tb), "=r"(tick));
+ for (nticks = 0; tick < 0; nticks++)
+ tick += ticks_per_intr;
+ __asm __volatile ("mtdec %0" :: "r"(tick));
+ /*
+ * lasttb is used during microtime. Set it to the virtual
+ * start of this tick interval.
+ */
+ lasttb = tb + tick - ticks_per_intr;
+
+#if 0 /* XXX */
+ intrcnt[CNT_CLOCK]++;
+ {
+ int pri;
+ int msr;
+
+ pri = splclock();
+ if (pri & (1 << SPL_CLOCK)) {
+ tickspending += nticks;
+ }
+ else {
+ nticks += tickspending;
+ tickspending = 0;
+
+ /*
+ * Reenable interrupts
+ */
+ __asm __volatile ("mfmsr %0; ori %0, %0, %1; mtmsr %0"
+ : "=r"(msr) : "K"(PSL_EE));
+
+ /*
+ * Do standard timer interrupt stuff.
+ * Do softclock stuff only on the last iteration.
+ */
+ frame->pri = pri | (1 << SIR_CLOCK);
+ while (--nticks > 0)
+ hardclock(frame);
+ frame->pri = pri;
+ hardclock(frame);
+ }
+ splx(pri);
+ }
+#endif
+}
+
+void
+cpu_initclocks(void)
+{
+
+ /* Do nothing */
+}
+
+static __inline u_quad_t
+mftb(void)
+{
+ u_long scratch;
+ u_quad_t tb;
+
+ __asm ("1: mftbu %0; mftb %0+1; mftbu %1; cmpw 0,%0,%1; bne 1b"
+ : "=r"(tb), "=r"(scratch));
+ return tb;
+}
+
+/*
+ * Wait for about n microseconds (at least!).
+ */
+void
+delay(unsigned n)
+{
+ u_quad_t tb;
+ u_long tbh, tbl, scratch;
+
+ tb = mftb();
+ tb += (n * 1000 + ns_per_tick - 1) / ns_per_tick;
+ tbh = tb >> 32;
+ tbl = tb;
+ __asm ("1: mftbu %0; cmplw %0,%1; blt 1b; bgt 2f;"
+ "mftb %0; cmplw %0,%2; blt 1b; 2:"
+ :: "r"(scratch), "r"(tbh), "r"(tbl));
+}
+
+/*
+ * Nothing to do.
+ */
+void
+setstatclockrate(int arg)
+{
+
+ /* Do nothing */
+}
diff --git a/sys/powerpc/powerpc/copyinstr.c b/sys/powerpc/powerpc/copyinstr.c
new file mode 100644
index 0000000..a0a975f
--- /dev/null
+++ b/sys/powerpc/powerpc/copyinstr.c
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (C) 1995 Wolfgang Solfrank.
+ * Copyright (C) 1995 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: copyinstr.c,v 1.3 2000/02/19 23:29:16 chs Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/systm.h>
+
+/*
+ * Emulate copyinstr.
+ */
+int
+copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done)
+{
+ const u_char *up;
+ u_char *kp;
+ size_t l;
+ int c, rv;
+
+ up = udaddr;
+ kp = kaddr;
+
+ rv = ENAMETOOLONG;
+ for (l = 0; len-- > 0; l++) {
+ if ((c = fubyte(up++)) < 0) {
+ rv = EFAULT;
+ break;
+ }
+ if (!(*kp++ = c)) {
+ l++;
+ rv = 0;
+ break;
+ }
+ }
+ if (done != NULL) {
+ *done = l;
+ }
+ return rv;
+}
diff --git a/sys/powerpc/powerpc/copystr.c b/sys/powerpc/powerpc/copystr.c
new file mode 100644
index 0000000..d7bedc2
--- /dev/null
+++ b/sys/powerpc/powerpc/copystr.c
@@ -0,0 +1,70 @@
+/*-
+ * Copyright (C) 1995 Wolfgang Solfrank.
+ * Copyright (C) 1995 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: copystr.c,v 1.3 2000/06/08 06:47:17 kleink Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/systm.h>
+
+/*
+ * Emulate copyinstr.
+ */
+int
+copystr(kfaddr, kdaddr, len, done)
+ const void *kfaddr;
+ void *kdaddr;
+ size_t len;
+ size_t *done;
+{
+ const u_char *kfp = kfaddr;
+ u_char *kdp = kdaddr;
+ size_t l;
+ int rv;
+
+ rv = ENAMETOOLONG;
+ for (l = 0; len-- > 0; l++) {
+ if (!(*kdp++ = *kfp++)) {
+ l++;
+ rv = 0;
+ break;
+ }
+ }
+ if (done != NULL) {
+ *done = l;
+ }
+ return rv;
+}
diff --git a/sys/powerpc/powerpc/fubyte.c b/sys/powerpc/powerpc/fubyte.c
new file mode 100644
index 0000000..35aa8d5
--- /dev/null
+++ b/sys/powerpc/powerpc/fubyte.c
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (C) 1993 Wolfgang Solfrank.
+ * Copyright (C) 1993 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: fubyte.c,v 1.2 2000/06/08 07:29:22 kleink Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/systm.h>
+
+/*
+ * Emulate fubyte.
+ */
+int
+fubyte(const void *addr)
+{
+ unsigned char c;
+
+ if (copyin(addr,&c,sizeof(c))) {
+ return -1;
+ }
+
+ return c;
+}
diff --git a/sys/powerpc/powerpc/fuswintr.c b/sys/powerpc/powerpc/fuswintr.c
new file mode 100644
index 0000000..a1079b0
--- /dev/null
+++ b/sys/powerpc/powerpc/fuswintr.c
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (C) 1994 Wolfgang Solfrank.
+ * Copyright (C) 1994 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: fuswintr.c,v 1.2 2000/06/08 07:29:54 kleink Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/resourcevar.h>
+
+/*
+ * Emulate fuswintr
+ *
+ * Simply return fault for all cases
+ */
+int
+fuswintr(void *addr)
+{
+
+ return -1;
+}
diff --git a/sys/powerpc/powerpc/fuword.c b/sys/powerpc/powerpc/fuword.c
new file mode 100644
index 0000000..2965ca9
--- /dev/null
+++ b/sys/powerpc/powerpc/fuword.c
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (C) 1993 Wolfgang Solfrank.
+ * Copyright (C) 1993 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: fubyte.c,v 1.2 2000/06/08 07:29:22 kleink Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/systm.h>
+
+/*
+ * Emulate fuword.
+ */
+long
+fuword(const void *addr)
+{
+ long l;
+
+ if (copyin(addr, &l, sizeof(l))) {
+ return -1;
+ }
+
+ return l;
+}
diff --git a/sys/powerpc/powerpc/locore.S b/sys/powerpc/powerpc/locore.S
new file mode 100644
index 0000000..2ede211
--- /dev/null
+++ b/sys/powerpc/powerpc/locore.S
@@ -0,0 +1,1373 @@
+/* $FreeBSD$ */
+/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
+
+/*
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_ddb.h"
+#include "opt_ipkdb.h"
+#include "assym.s"
+
+#include <sys/syscall.h>
+
+#include <machine/trap.h>
+#include <machine/param.h>
+#include <machine/pmap.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+
+/*
+ * Some instructions gas doesn't understand (yet?)
+ */
+#define bdneq bdnzf 2,
+
+/*
+ * Globals
+ */
+ .data
+GLOBAL(tmpstk)
+ .space 8208
+GLOBAL(esym)
+ .long 0 /* end of symbol table */
+GLOBAL(proc0paddr)
+ .long 0 /* proc0 p_addr */
+GLOBAL(PTmap)
+ .long 0 /* PTmap */
+
+GLOBAL(intrnames)
+ .asciz "irq0", "irq1", "irq2", "irq3"
+ .asciz "irq4", "irq5", "irq6", "irq7"
+ .asciz "irq8", "irq9", "irq10", "irq11"
+ .asciz "irq12", "irq13", "irq14", "irq15"
+ .asciz "irq16", "irq17", "irq18", "irq19"
+ .asciz "irq20", "irq21", "irq22", "irq23"
+ .asciz "irq24", "irq25", "irq26", "irq27"
+ .asciz "irq28", "irq29", "irq30", "irq31"
+ .asciz "irq32", "irq33", "irq34", "irq35"
+ .asciz "irq36", "irq37", "irq38", "irq39"
+ .asciz "irq40", "irq41", "irq42", "irq43"
+ .asciz "irq44", "irq45", "irq46", "irq47"
+ .asciz "irq48", "irq49", "irq50", "irq51"
+ .asciz "irq52", "irq53", "irq54", "irq55"
+ .asciz "irq56", "irq57", "irq58", "irq59"
+ .asciz "irq60", "irq61", "irq62", "irq63"
+ .asciz "clock", "softclock", "softnet", "softserial"
+GLOBAL(eintrnames)
+ .align 4
+GLOBAL(intrcnt)
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0
+GLOBAL(eintrcnt)
+
+GLOBAL(ofmsr)
+ .long 0 /* msr used in Open Firmware */
+
+GLOBAL(powersave)
+ .long 0
+
+/*
+ * File-scope for locore.S
+ */
+idle_u:
+ .long 0 /* fake uarea during idle after exit */
+openfirmware_entry:
+ .long 0 /* openfirmware entry point */
+srsave:
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
+/*
+ * This symbol is here for the benefit of kvm_mkdb, and is supposed to
+ * mark the start of kernel text.
+ */
+ .text
+ .globl kernel_text
+kernel_text:
+
+/*
+ * Startup entry. Note, this must be the first thing in the text
+ * segment!
+ */
+ .text
+ .globl start
+start:
+#ifdef FIRMWORKSBUGS
+ mfmsr 0
+ andi. 0,0,PSL_IR|PSL_DR
+ beq 1f
+
+ bl ofwr_init
+1:
+#endif
+ li 8,0
+ li 9,0x100
+ mtctr 9
+1:
+ dcbf 0,8
+ icbi 0,8
+ addi 8,8,0x20
+ bdnz 1b
+ sync
+ isync
+
+ mtibatu 0,0
+ mtibatu 1,0
+ mtibatu 2,0
+ mtibatu 3,0
+ mtdbatu 0,0
+ mtdbatu 1,0
+ mtdbatu 2,0
+ mtdbatu 3,0
+
+ li 9,0x12
+ mtibatl 0,9
+ mtdbatl 0,9
+ li 9,0x1ffe
+ mtibatu 0,9
+ mtdbatu 0,9
+ isync
+
+ lis 8,openfirmware_entry@ha
+ stw 5,openfirmware_entry@l(8) /* save client interface handler */
+ mr 3,5
+
+ lis 1,tmpstk@ha
+ addi 1,1,tmpstk@l
+ addi 1,1,8192
+
+ mfmsr 0
+ lis 9,ofmsr@ha
+ stw 0,ofmsr@l(9)
+
+ bl OF_init
+
+ lis 4,end@ha
+ addi 4,4,end@l
+ mr 5,4
+ li 9,PAGE_MASK
+ add 4,4,9
+ andc 4,4,9
+ lis 9,OF_buf@ha
+ stw 4,OF_buf@l(9)
+ addi 4,4,PAGE_SIZE
+ lis 9,proc0paddr@ha
+ stw 4,proc0paddr@l(9)
+ addi 4,4,USPACE-FRAMELEN
+ mr 1,4
+ xor 0,0,0
+ stwu 0,-16(1)
+
+ lis 3,kernel_text@ha
+ addi 3,3,kernel_text@l
+#if 0
+ mr 5,6
+#endif
+
+ bl powerpc_init
+ bl mi_startup
+ b OF_exit
+
+#if 0 /* XXX: We may switch back to this in the future. */
+/*
+ * OpenFirmware entry point
+ */
+ENTRY(openfirmware)
+ mflr 0 /* save return address */
+ stw 0,4(1)
+ stwu 1,-16(1) /* setup stack frame */
+
+ mfmsr 4 /* save msr */
+ stw 4,8(1)
+
+ lis 4,openfirmware_entry@ha /* get firmware entry point */
+ lwz 4,openfirmware_entry@l(4)
+ mtlr 4
+
+ li 0,0 /* clear battable translations */
+ mtdbatu 2,0
+ mtdbatu 3,0
+ mtibatu 2,0
+ mtibatu 3,0
+
+ lis 4,ofmsr@ha /* Open Firmware msr */
+ lwz 4,ofmsr@l(4)
+ mtmsr 4
+ isync
+
+ lis 4,srsave@ha /* save old SR */
+ addi 4,4,srsave@l
+ li 5,0
+1: mfsrin 0,5
+ stw 0,0(4)
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+
+ lis 4,ofw_pmap@ha /* load OFW SR */
+ addi 4,4,ofw_pmap@l
+ lwz 0,PM_KERNELSR(4)
+ cmpwi 0,0 /* pm_sr[KERNEL_SR] == 0? */
+ beq 2f /* then skip (not initialized yet) */
+ li 5,0
+1: lwz 0,0(4)
+ mtsrin 0,5
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+2:
+ blrl /* call Open Firmware */
+
+ mfmsr 4
+ li 5,PSL_IR|PSL_DR
+ andc 4,4,5
+ mtmsr 4
+ isync
+
+ lis 4,srsave@ha /* restore saved SR */
+ addi 4,4,srsave@l
+ li 5,0
+1: lwz 0,0(4)
+ mtsrin 0,5
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+
+ lwz 4,8(1) /* restore msr */
+ mtmsr 4
+ isync
+
+ lwz 1,0(1) /* and return */
+ lwz 0,4(1)
+ mtlr 0
+ blr
+#endif
+
+/*
+ * Switch to/from OpenFirmware real mode stack
+ *
+ * Note: has to be called as the very first thing in OpenFirmware interface
+ * routines.
+ * E.g.:
+ * int
+ * OF_xxx(arg1, arg2)
+ * type arg1, arg2;
+ * {
+ * static struct {
+ * char *name;
+ * int nargs;
+ * int nreturns;
+ * char *method;
+ * int arg1;
+ * int arg2;
+ * int ret;
+ * } args = {
+ * "xxx",
+ * 2,
+ * 1,
+ * };
+ *
+ * ofw_stack();
+ * args.arg1 = arg1;
+ * args.arg2 = arg2;
+ * if (openfirmware(&args) < 0)
+ * return -1;
+ * return args.ret;
+ * }
+ */
+
+ .local firmstk
+ .comm firmstk,PAGE_SIZE,8
+
+ENTRY(ofw_stack)
+ mfmsr 8 /* turn off interrupts */
+ andi. 0,8,~(PSL_EE|PSL_RI)@l
+ mtmsr 0
+ stw 8,4(1) /* abuse return address slot */
+
+ lwz 5,0(1) /* get length of stack frame */
+ subf 5,1,5
+
+ lis 7,firmstk+PAGE_SIZE-8@ha
+ addi 7,7,firmstk+PAGE_SIZE-8@l
+ lis 6,ofw_back@ha
+ addi 6,6,ofw_back@l
+ subf 4,5,7 /* make room for stack frame on
+ new stack */
+ stw 6,-4(7) /* setup return pointer */
+ stwu 1,-8(7)
+
+ stw 7,-8(4)
+
+ addi 3,1,8
+ addi 1,4,-8
+ subi 5,5,8
+
+ cmpw 3,4
+ beqlr
+
+ mr 0,5
+ addi 5,5,-1
+ cmpwi 0,0
+ beqlr
+
+1: lwz 0,0(3)
+ stw 0,0(4)
+ addi 3,3,1
+ addi 4,4,1
+ mr 0,5
+ addi 5,5,-1
+ cmpwi 0,0
+ bne 1b
+ blr
+
+ofw_back:
+ lwz 1,0(1) /* get callers original stack pointer */
+
+ lwz 0,4(1) /* get saved msr from abused slot */
+ mtmsr 0
+
+ lwz 1,0(1) /* return */
+ lwz 0,4(1)
+ mtlr 0
+ blr
+
+/*
+ * Data used during primary/secondary traps/interrupts
+ */
+#define tempsave 0x2e0 /* primary save area for trap handling */
+#define disisave 0x3e0 /* primary save area for dsi/isi traps */
+
+#define INTSTK (8*1024) /* 8K interrupt stack */
+ .data
+ .align 4
+intstk:
+ .space INTSTK /* interrupt stack */
+
+GLOBAL(intr_depth)
+ .long -1 /* in-use marker */
+
+#define SPILLSTK 1024 /* 1K spill stack */
+
+ .comm spillstk,SPILLSTK,8
+
+/*
+ * This code gets copied to all the trap vectors
+ * (except ISI/DSI, ALI, the interrupts, and possibly the debugging
+ * traps when using IPKDB).
+ */
+ .text
+ .globl trapcode,trapsize
+trapcode:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+/* Test whether we already had PR set */
+ mfsrr1 31
+ mtcr 31
+ bc 4,17,1f /* branch if PSL_PR is clear */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+1:
+ bla s_trap
+trapsize = .-trapcode
+
+/*
+ * For ALI: has to save DSISR and DAR
+ */
+ .globl alitrap,alisize
+alitrap:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mfdar 30
+ mfdsisr 31
+ stmw 30,tempsave+16(0)
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+/* Test whether we already had PR set */
+ mfsrr1 31
+ mtcr 31
+ bc 4,17,1f /* branch if PSL_PR is clear */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+1:
+ bla s_trap
+alisize = .-alitrap
+
+/*
+ * Similar to the above for DSI
+ * Has to handle BAT spills
+ * and standard pagetable spills
+ */
+ .globl dsitrap,dsisize
+dsitrap:
+ stmw 28,disisave(0) /* free r28-r31 */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ mtsprg 2,30 /* in SPRG2 */
+ mfsrr1 31 /* test kernel mode */
+ mtcr 31
+ bc 12,17,1f /* branch if PSL_PR is set */
+ mfdar 31 /* get fault address */
+ rlwinm 31,31,7,25,28 /* get segment * 8 */
+
+ /* get batu */
+ addis 31,31,battable@ha
+ lwz 30,battable@l(31)
+ mtcr 30
+ bc 4,30,1f /* branch if supervisor valid is
+ false */
+ /* get batl */
+ lwz 31,battable+4@l(31)
+/* We randomly use the highest two bat registers here */
+ mftb 28
+ andi. 28,28,1
+ bne 2f
+ mtdbatu 2,30
+ mtdbatl 2,31
+ b 3f
+2:
+ mtdbatu 3,30
+ mtdbatl 3,31
+3:
+ mfsprg 30,2 /* restore XER */
+ mtxer 30
+ mtcr 29 /* restore CR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+1:
+ mflr 28 /* save LR */
+ bla s_dsitrap
+dsisize = .-dsitrap
+
+/*
+ * Similar to the above for ISI
+ */
+ .globl isitrap,isisize
+isitrap:
+ stmw 28,disisave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfsrr1 31 /* test kernel mode */
+ mtcr 31
+ bc 12,17,1f /* branch if PSL_PR is set */
+ mfsrr0 31 /* get fault address */
+ rlwinm 31,31,7,25,28 /* get segment * 8 */
+
+ /* get batu */
+ addis 31,31,battable@ha
+ lwz 30,battable@l(31)
+ mtcr 30
+ bc 4,30,1f /* branch if supervisor valid is
+ false */
+ mtibatu 3,30
+
+ /* get batl */
+ lwz 30,battable+4@l(31)
+ mtibatl 3,30
+
+ mtcr 29 /* restore CR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+1:
+ bla s_isitrap
+isisize = .-isitrap
+
+/*
+ * This one for the external interrupt handler.
+ */
+ .globl extint,extsize
+extint:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ lis 1,intstk+INTSTK@ha /* get interrupt stack */
+ addi 1,1,intstk+INTSTK@l
+ lwz 31,0(1) /* were we already running on intstk? */
+ addic. 31,31,1
+ stw 31,0(1)
+ beq 1f
+ mfsprg 1,1 /* yes, get old SP */
+1:
+ ba extintr
+extsize = .-extint
+
+/*
+ * And this one for the decrementer interrupt handler.
+ */
+ .globl decrint,decrsize
+decrint:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ lis 1,intstk+INTSTK@ha /* get interrupt stack */
+ addi 1,1,intstk+INTSTK@l
+ lwz 31,0(1) /* were we already running on intstk? */
+ addic. 31,31,1
+ stw 31,0(1)
+ beq 1f
+ mfsprg 1,1 /* yes, get old SP */
+1:
+ ba decrintr
+decrsize = .-decrint
+
+/*
+ * Now the tlb software load for 603 processors:
+ * (Code essentially from the 603e User Manual, Chapter 5, but
+ * corrected a lot.)
+ */
+#define DMISS 976
+#define DCMP 977
+#define HASH1 978
+#define HASH2 979
+#define IMISS 980
+#define ICMP 981
+#define RPA 982
+
+ .globl tlbimiss,tlbimsize
+tlbimiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,ICMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ andi. 3,1,8 /* check G-bit */
+ bne 4f /* if guarded, take ISI */
+ mtctr 0 /* restore counter */
+ mfspr 0,IMISS /* get the miss address for the tlbli */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ ori 1,1,0x100 /* set the reference bit */
+ mtspr RPA,1 /* set the pte */
+ srwi 1,1,8 /* get byte 7 of pte */
+ tlbli 0 /* load the itlb */
+ stb 1,6(2) /* update page table */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+4: /* guarded */
+ mfsrr1 3
+ andi. 2,3,0xffff /* clean upper srr1 */
+ oris 2,2,0x8000000@h /* set srr<4> to flag prot violation */
+ b 6f
+5: /* not found anywhere */
+ mfsrr1 3
+ andi. 2,3,0xffff /* clean upper srr1 */
+ oris 2,2,0x40000000@h /* set srr1<1> to flag pte not found */
+6:
+ mtctr 0 /* restore counter */
+ mtsrr1 2
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_ISI
+tlbimsize = .-tlbimiss
+
+ .globl tlbdlmiss,tlbdlmsize
+tlbdlmiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,DCMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ mtctr 0 /* restore counter */
+ mfspr 0,DMISS /* get the miss address for the tlbld */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ ori 1,1,0x100 /* set the reference bit */
+ mtspr RPA,1 /* set the pte */
+ srwi 1,1,8 /* get byte 7 of pte */
+ tlbld 0 /* load the dtlb */
+ stb 1,6(2) /* update page table */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+5: /* not found anywhere */
+ mfsrr1 3
+ lis 1,0x40000000@h /* set dsisr<1> to flag pte not found */
+ mtctr 0 /* restore counter */
+ andi. 2,3,0xffff /* clean upper srr1 */
+ mtsrr1 2
+ mtdsisr 1 /* load the dsisr */
+ mfspr 1,DMISS /* get the miss address */
+ mtdar 1 /* put in dar */
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_DSI
+tlbdlmsize = .-tlbdlmiss
+
+ .globl tlbdsmiss,tlbdsmsize
+tlbdsmiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,DCMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ andi. 3,1,0x80 /* check the C-bit */
+ beq 4f
+5:
+ mtctr 0 /* restore counter */
+ mfspr 0,DMISS /* get the miss address for the tlbld */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ mtspr RPA,1 /* set the pte */
+ tlbld 0 /* load the dtlb */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+4: /* found, but C-bit = 0 */
+ rlwinm. 3,1,30,0,1 /* test PP */
+ bge- 7f
+ andi. 3,1,1
+ beq+ 8f
+9: /* found, but protection violation (PP==00)*/
+ mfsrr1 3
+ lis 1,0xa000000@h /* indicate protection violation
+ on store */
+ b 1f
+7: /* found, PP=1x */
+ mfspr 3,DMISS /* get the miss address */
+ mfsrin 1,3 /* get the segment register */
+ mfsrr1 3
+ rlwinm 3,3,18,31,31 /* get PR-bit */
+ rlwnm. 2,2,3,1,1 /* get the key */
+ bne- 9b /* protection violation */
+8: /* found, set reference/change bits */
+ lwz 1,4(2) /* reload tlb entry */
+ ori 1,1,0x180
+ sth 1,6(2)
+ b 5b
+5: /* not found anywhere */
+ mfsrr1 3
+ lis 1,0x42000000@h /* set dsisr<1> to flag pte not found */
+ /* dsisr<6> to flag store */
+1:
+ mtctr 0 /* restore counter */
+ andi. 2,3,0xffff /* clean upper srr1 */
+ mtsrr1 2
+ mtdsisr 1 /* load the dsisr */
+ mfspr 1,DMISS /* get the miss address */
+ mtdar 1 /* put in dar */
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_DSI
+tlbdsmsize = .-tlbdsmiss
+
+#ifdef DDB
+#define ddbsave 0xde0 /* primary save area for DDB */
+/*
+ * In case of DDB we want a separate trap catcher for it
+ */
+ .local ddbstk
+ .comm ddbstk,INTSTK,8 /* ddb stack */
+
+ .globl ddblow,ddbsize
+ddblow:
+ mtsprg 1,1 /* save SP */
+ stmw 28,ddbsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ lis 1,ddbstk+INTSTK@ha /* get new SP */
+ addi 1,1,ddbstk+INTSTK@l
+ bla ddbtrap
+ddbsize = .-ddblow
+#endif /* DDB */
+
+#ifdef IPKDB
+#define ipkdbsave 0xde0 /* primary save area for IPKDB */
+/*
+ * In case of IPKDB we want a separate trap catcher for it
+ */
+
+ .local ipkdbstk
+ .comm ipkdbstk,INTSTK,8 /* ipkdb stack */
+
+ .globl ipkdblow,ipkdbsize
+ipkdblow:
+ mtsprg 1,1 /* save SP */
+ stmw 28,ipkdbsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ lis 1,ipkdbstk+INTSTK@ha /* get new SP */
+ addi 1,1,ipkdbstk+INTSTK@l
+ bla ipkdbtrap
+ipkdbsize = .-ipkdblow
+#endif /* IPKDB */
+
+/*
+ * FRAME_SETUP assumes:
+ * SPRG1 SP (1)
+ * savearea r28-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
+ * 28 LR
+ * 29 CR
+ * 1 kernel stack
+ * LR trap type
+ * SRR0/1 as at start of trap
+ */
+#define FRAME_SETUP(savearea) \
+/* Have to enable translation to allow access of kernel stack: */ \
+ mfsrr0 30; \
+ mfsrr1 31; \
+ stmw 30,savearea+24(0); \
+ mfmsr 30; \
+ ori 30,30,(PSL_DR|PSL_IR); \
+ mtmsr 30; \
+ isync; \
+ mfsprg 31,1; \
+ stwu 31,-FRAMELEN(1); \
+ stw 0,FRAME_0+8(1); \
+ stw 31,FRAME_1+8(1); \
+ stw 28,FRAME_LR+8(1); \
+ stw 29,FRAME_CR+8(1); \
+ lmw 28,savearea(0); \
+ stmw 2,FRAME_2+8(1); \
+ lmw 28,savearea+16(0); \
+ mfxer 3; \
+ mfctr 4; \
+ mflr 5; \
+ andi. 5,5,0xff00; \
+ stw 3,FRAME_XER+8(1); \
+ stw 4,FRAME_CTR+8(1); \
+ stw 5,FRAME_EXC+8(1); \
+ stw 28,FRAME_DAR+8(1); \
+ stw 29,FRAME_DSISR+8(1); \
+ stw 30,FRAME_SRR0+8(1); \
+ stw 31,FRAME_SRR1+8(1)
+
+#define FRAME_LEAVE(savearea) \
+/* Now restore regs: */ \
+ lwz 2,FRAME_SRR0+8(1); \
+ lwz 3,FRAME_SRR1+8(1); \
+ lwz 4,FRAME_CTR+8(1); \
+ lwz 5,FRAME_XER+8(1); \
+ lwz 6,FRAME_LR+8(1); \
+ lwz 7,FRAME_CR+8(1); \
+ stw 2,savearea(0); \
+ stw 3,savearea+4(0); \
+ mtctr 4; \
+ mtxer 5; \
+ mtlr 6; \
+ mtsprg 1,7; /* save cr */ \
+ lmw 2,FRAME_2+8(1); \
+ lwz 0,FRAME_0+8(1); \
+ lwz 1,FRAME_1+8(1); \
+ mtsprg 2,2; /* save r2 & r3 */ \
+ mtsprg 3,3; \
+/* Disable translation, machine check and recoverability: */ \
+ mfmsr 2; \
+ andi. 2,2,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \
+ mtmsr 2; \
+ isync; \
+/* Decide whether we return to user mode: */ \
+ lwz 3,savearea+4(0); \
+ mtcr 3; \
+ bc 4,17,1f; /* branch if PSL_PR is false */ \
+/* Restore user & kernel access SR: */ \
+/* lis 2,curpm@ha; get real address of pmap */ \
+/* lwz 2,curpm@l(2); */ \
+/* lwz 3,PM_USRSR(2); */ \
+/* mtsr USER_SR,3; */ \
+/* lwz 3,PM_KERNELSR(2); */ \
+/* mtsr KERNEL_SR,3; */ \
+1: mfsprg 2,1; /* restore cr */ \
+ mtcr 2; \
+ lwz 2,savearea(0); \
+ lwz 3,savearea+4(0); \
+ mtsrr0 2; \
+ mtsrr1 3; \
+ mfsprg 2,2; /* restore r2 & r3 */ \
+ mfsprg 3,3
+
+/*
+ * Preamble code for DSI/ISI traps
+ */
+disitrap:
+ lmw 30,disisave(0)
+ stmw 30,tempsave(0)
+ lmw 30,disisave+8(0)
+ stmw 30,tempsave+8(0)
+ mfdar 30
+ mfdsisr 31
+ stmw 30,tempsave+16(0)
+realtrap:
+/* Test whether we already had PR set */
+ mfsrr1 1
+ mtcr 1
+ mfsprg 1,1 /* restore SP (might have been
+ overwritten) */
+ bc 4,17,s_trap /* branch if PSL_PR is false */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+
+/*
+ * Now the common trap catching code.
+ */
+s_trap:
+/* First have to enable KERNEL mapping */
+ lis 31,KERNEL_SEGMENT@h
+ ori 31,31,KERNEL_SEGMENT@l
+ mtsr KERNEL_SR,31
+ FRAME_SETUP(tempsave)
+/* Now we can recover interrupts again: */
+ mfmsr 7
+ ori 7,7,(PSL_EE|PSL_ME|PSL_RI)@l
+ mtmsr 7
+ isync
+/* Call C trap code: */
+trapagain:
+ addi 3,1,8
+ bl trap
+trapexit:
+/* Disable interrupts: */
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l
+ mtmsr 3
+/* Test AST pending: */
+ lwz 5,FRAME_SRR1+8(1)
+ mtcr 5
+ bc 4,17,1f /* branch if PSL_PR is false */
+ lis 3,astpending@ha
+ lwz 4,astpending@l(3)
+ andi. 4,4,1
+ beq 1f
+#if 0 /* XXX */
+ li 6,EXC_AST
+#endif
+ stw 6,FRAME_EXC+8(1)
+ b trapagain
+1:
+#if 0
+ FRAME_LEAVE(tempsave)
+#endif
+ rfi
+
+/*
+ * Child comes here at the end of a fork.
+ * Mostly similar to the above.
+ */
+ .globl fork_trampoline
+fork_trampoline:
+ xor 3,3,3
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+ mtlr 31
+ mr 3,30
+ blrl /* jump indirect to r31 */
+ b trapexit
+
+/*
+ * DSI second stage fault handler
+ */
+s_dsitrap:
+ mfdsisr 31 /* test whether this may be a
+ spill fault */
+ mtcr 31
+ mtsprg 1,1 /* save SP */
+ bc 4,1,disitrap /* branch if table miss is false */
+ lis 1,spillstk+SPILLSTK@ha
+ addi 1,1,spillstk+SPILLSTK@l /* get spill stack */
+ stwu 1,-52(1)
+ stw 0,48(1) /* save non-volatile registers */
+ stw 3,44(1)
+ stw 4,40(1)
+ stw 5,36(1)
+ stw 6,32(1)
+ stw 7,28(1)
+ stw 8,24(1)
+ stw 9,20(1)
+ stw 10,16(1)
+ stw 11,12(1)
+ stw 12,8(1)
+ mflr 30 /* save trap type */
+ mfctr 31 /* & CTR */
+ mfdar 3
+s_pte_spill:
+ bl pte_spill /* try a spill */
+ or. 3,3,3
+ mtctr 31 /* restore CTR */
+ mtlr 30 /* and trap type */
+ mfsprg 31,2 /* get saved XER */
+ mtxer 31 /* restore XER */
+ lwz 12,8(1) /* restore non-volatile registers */
+ lwz 11,12(1)
+ lwz 10,16(1)
+ lwz 9,20(1)
+ lwz 8,24(1)
+ lwz 7,28(1)
+ lwz 6,32(1)
+ lwz 5,36(1)
+ lwz 4,40(1)
+ lwz 3,44(1)
+ lwz 0,48(1)
+ beq disitrap
+ mfsprg 1,1 /* restore SP */
+ mtcr 29 /* restore CR */
+ mtlr 28 /* restore LR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+
+/*
+ * ISI second stage fault handler
+ */
+s_isitrap:
+ mfsrr1 31 /* test whether this may be a
+ spill fault */
+ mtcr 31
+ mtsprg 1,1 /* save SP */
+ bc 4,1,disitrap /* branch if table miss is false */
+ lis 1,spillstk+SPILLSTK@ha
+ addi 1,1,spillstk+SPILLSTK@l /* get spill stack */
+ stwu 1,-52(1)
+ stw 0,48(1) /* save non-volatile registers */
+ stw 3,44(1)
+ stw 4,40(1)
+ stw 5,36(1)
+ stw 6,32(1)
+ stw 7,28(1)
+ stw 8,24(1)
+ stw 9,20(1)
+ stw 10,16(1)
+ stw 11,12(1)
+ stw 12,8(1)
+ mfxer 30 /* save XER */
+ mtsprg 2,30
+ mflr 30 /* save trap type */
+ mfctr 31 /* & ctr */
+ mfsrr0 3
+ b s_pte_spill /* above */
+
+/*
+ * External interrupt second level handler
+ */
+#define INTRENTER \
+/* Save non-volatile registers: */ \
+ stwu 1,-88(1); /* temporarily */ \
+ stw 0,84(1); \
+ mfsprg 0,1; /* get original SP */ \
+ stw 0,0(1); /* and store it */ \
+ stw 3,80(1); \
+ stw 4,76(1); \
+ stw 5,72(1); \
+ stw 6,68(1); \
+ stw 7,64(1); \
+ stw 8,60(1); \
+ stw 9,56(1); \
+ stw 10,52(1); \
+ stw 11,48(1); \
+ stw 12,44(1); \
+ stw 28,40(1); /* saved LR */ \
+ stw 29,36(1); /* saved CR */ \
+ stw 30,32(1); /* saved XER */ \
+ lmw 28,tempsave(0); /* restore r28-r31 */ \
+ mfctr 6; \
+ lis 5,intr_depth@ha; \
+ lwz 5,intr_depth@l(5); \
+ mfsrr0 4; \
+ mfsrr1 3; \
+ stw 6,28(1); \
+ stw 5,20(1); \
+ stw 4,12(1); \
+ stw 3,8(1); \
+/* interrupts are recoverable here, and enable translation */ \
+ lis 3,(KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@h; \
+ ori 3,3,(KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@l; \
+ mtsr KERNEL_SR,3; \
+ mfmsr 5; \
+ ori 5,5,(PSL_IR|PSL_DR|PSL_RI); \
+ mtmsr 5; \
+ isync
+
+ .globl extint_call
+extintr:
+ INTRENTER
+extint_call:
+ bl extint_call /* to be filled in later */
+
+intr_exit:
+/* Disable interrupts (should already be disabled) and MMU here: */
+ mfmsr 3
+ andi. 3,3,~(PSL_EE|PSL_ME|PSL_RI|PSL_DR|PSL_IR)@l
+ mtmsr 3
+ isync
+/* restore possibly overwritten registers: */
+ lwz 12,44(1)
+ lwz 11,48(1)
+ lwz 10,52(1)
+ lwz 9,56(1)
+ lwz 8,60(1)
+ lwz 7,64(1)
+ lwz 6,8(1)
+ lwz 5,12(1)
+ lwz 4,28(1)
+ lwz 3,32(1)
+ mtsrr1 6
+ mtsrr0 5
+ mtctr 4
+ mtxer 3
+/* Returning to user mode? */
+ mtcr 6 /* saved SRR1 */
+ bc 4,17,1f /* branch if PSL_PR is false */
+ mfsprg 3,0 /* get globaldata */
+ lwz 3,GD_CURPCB(3) /* get curpcb from globaldata */
+ lwz 3,PCB_PMR(3) /* get pmap real address from curpcb */
+ mtsr KERNEL_SR,3
+ lis 3,astpending@ha /* Test AST pending */
+ lwz 4,astpending@l(3)
+ andi. 4,4,1
+ beq 1f
+/* Setup for entry to realtrap: */
+ lwz 3,0(1) /* get saved SP */
+ mtsprg 1,3
+#if 0 /* XXX */
+ li 6,EXC_AST
+#endif
+ stmw 28,tempsave(0) /* establish tempsave again */
+ mtlr 6
+ lwz 28,40(1) /* saved LR */
+ lwz 29,36(1) /* saved CR */
+ lwz 6,68(1)
+ lwz 5,72(1)
+ lwz 4,76(1)
+ lwz 3,80(1)
+ lwz 0,84(1)
+ lis 30,intr_depth@ha /* adjust reentrancy count */
+ lwz 31,intr_depth@l(30)
+ addi 31,31,-1
+ stw 31,intr_depth@l(30)
+ b realtrap
+1:
+/* Here is the normal exit of extintr: */
+ lwz 5,36(1)
+ lwz 6,40(1)
+ mtcr 5
+ mtlr 6
+ lwz 6,68(1)
+ lwz 5,72(1)
+ lis 3,intr_depth@ha /* adjust reentrancy count */
+ lwz 4,intr_depth@l(3)
+ addi 4,4,-1
+ stw 4,intr_depth@l(3)
+ lwz 4,76(1)
+ lwz 3,80(1)
+ lwz 0,84(1)
+ lwz 1,0(1)
+ rfi
+
+/*
+ * Decrementer interrupt second level handler
+ */
+decrintr:
+ INTRENTER
+ addi 3,1,8 /* intr frame */
+ bl decr_intr
+ b intr_exit
+
+#ifdef DDB
+/*
+ * Deliberate entry to ddbtrap
+ */
+ .globl ddb_trap
+ddb_trap:
+ mtsprg 1,1
+ mfmsr 3
+ mtsrr1 3
+ andi. 3,3,~(PSL_EE|PSL_ME)@l
+ mtmsr 3 /* disable interrupts */
+ isync
+ stmw 28,ddbsave(0)
+ mflr 28
+ li 29,EXC_BPT
+ mtlr 29
+ mfcr 29
+ mtsrr0 28
+
+/*
+ * Now the ddb trap catching code.
+ */
+ddbtrap:
+ FRAME_SETUP(ddbsave)
+/* Call C trap code: */
+ addi 3,1,8
+ bl ddb_trap_glue
+ or. 3,3,3
+ bne ddbleave
+/* This wasn't for DDB, so switch to real trap: */
+ lwz 3,FRAME_EXC+8(1) /* save exception */
+ stw 3,ddbsave+8(0)
+ FRAME_LEAVE(ddbsave)
+ mtsprg 1,1 /* prepare for entrance to realtrap */
+ stmw 28,tempsave(0)
+ mflr 28
+ mfcr 29
+ lwz 31,ddbsave+8(0)
+ mtlr 31
+ b realtrap
+ddbleave:
+ FRAME_LEAVE(ddbsave)
+ rfi
+#endif /* DDB */
+
+#ifdef IPKDB
+/*
+ * Deliberate entry to ipkdbtrap
+ */
+ .globl ipkdb_trap
+ipkdb_trap:
+ mtsprg 1,1
+ mfmsr 3
+ mtsrr1 3
+ andi. 3,3,~(PSL_EE|PSL_ME)@l
+ mtmsr 3 /* disable interrupts */
+ isync
+ stmw 28,ipkdbsave(0)
+ mflr 28
+ li 29,EXC_BPT
+ mtlr 29
+ mfcr 29
+ mtsrr0 28
+
+/*
+ * Now the ipkdb trap catching code.
+ */
+ipkdbtrap:
+ FRAME_SETUP(ipkdbsave)
+/* Call C trap code: */
+ addi 3,1,8
+ bl ipkdb_trap_glue
+ or. 3,3,3
+ bne ipkdbleave
+/* This wasn't for IPKDB, so switch to real trap: */
+ lwz 3,FRAME_EXC+8(1) /* save exception */
+ stw 3,ipkdbsave+8(0)
+ FRAME_LEAVE(ipkdbsave)
+ mtsprg 1,1 /* prepare for entrance to realtrap */
+ stmw 28,tempsave(0)
+ mflr 28
+ mfcr 29
+ lwz 31,ipkdbsave+8(0)
+ mtlr 31
+ b realtrap
+ipkdbleave:
+ FRAME_LEAVE(ipkdbsave)
+ rfi
+
+ipkdbfault:
+ ba _ipkdbfault
+_ipkdbfault:
+ mfsrr0 3
+ addi 3,3,4
+ mtsrr0 3
+ li 3,-1
+ rfi
+
+/*
+ * int ipkdbfbyte(unsigned char *p)
+ */
+ .globl ipkdbfbyte
+ipkdbfbyte:
+ li 9,EXC_DSI /* establish new fault routine */
+ lwz 5,0(9)
+ lis 6,ipkdbfault@ha
+ lwz 6,ipkdbfault@l(6)
+ stw 6,0(9)
+#ifdef IPKDBUSERHACK
+ lis 8,ipkdbsr@ha
+ lwz 8,ipkdbsr@l(8)
+ mtsr USER_SR,8
+ isync
+#endif
+ dcbst 0,9 /* flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ lbz 3,0(3) /* fetch data */
+ stw 5,0(9) /* restore previous fault handler */
+ dcbst 0,9 /* and flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ blr
+
+/*
+ * int ipkdbsbyte(unsigned char *p, int c)
+ */
+ .globl ipkdbsbyte
+ipkdbsbyte:
+ li 9,EXC_DSI /* establish new fault routine */
+ lwz 5,0(9)
+ lis 6,ipkdbfault@ha
+ lwz 6,ipkdbfault@l(6)
+ stw 6,0(9)
+#ifdef IPKDBUSERHACK
+ lis 8,ipkdbsr@ha
+ lwz 8,ipkdbsr@l(8)
+ mtsr USER_SR,8
+ isync
+#endif
+ dcbst 0,9 /* flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ mr 6,3
+ xor 3,3,3
+ stb 4,0(6)
+ dcbst 0,6 /* Now do appropriate flushes
+ to data... */
+ sync
+ icbi 0,6 /* and instruction caches */
+ stw 5,0(9) /* restore previous fault handler */
+ dcbst 0,9 /* and flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ blr
+#endif /* IPKDB */
+
+/*
+ * int setfault()
+ *
+ * Similar to setjmp to setup for handling faults on accesses to user memory.
+ * Any routine using this may only call bcopy, either the form below,
+ * or the (currently used) C code optimized, so it doesn't use any non-volatile
+ * registers.
+ */
+ .globl setfault
+setfault:
+ mflr 0
+ mfcr 12
+ mfsprg 4,0
+ lwz 4,GD_CURPCB(4)
+ stw 3,PCB_FAULT(4)
+ stw 0,0(3)
+ stw 1,4(3)
+ stw 2,8(3)
+ stmw 12,12(3)
+ xor 3,3,3
+ blr
+
+/*
+ * Signal "trampoline" code.
+ */
+ .globl sigcode
+sigcode:
+ b sys_exit
+esigcode:
+ .data
+GLOBAL(szsigcode)
+ .long esigcode-sigcode
+ .text
+
diff --git a/sys/powerpc/powerpc/locore.s b/sys/powerpc/powerpc/locore.s
new file mode 100644
index 0000000..2ede211
--- /dev/null
+++ b/sys/powerpc/powerpc/locore.s
@@ -0,0 +1,1373 @@
+/* $FreeBSD$ */
+/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
+
+/*
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_ddb.h"
+#include "opt_ipkdb.h"
+#include "assym.s"
+
+#include <sys/syscall.h>
+
+#include <machine/trap.h>
+#include <machine/param.h>
+#include <machine/pmap.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+
+/*
+ * Some instructions gas doesn't understand (yet?)
+ */
+#define bdneq bdnzf 2,
+
+/*
+ * Globals
+ */
+ .data
+GLOBAL(tmpstk)
+ .space 8208
+GLOBAL(esym)
+ .long 0 /* end of symbol table */
+GLOBAL(proc0paddr)
+ .long 0 /* proc0 p_addr */
+GLOBAL(PTmap)
+ .long 0 /* PTmap */
+
+GLOBAL(intrnames)
+ .asciz "irq0", "irq1", "irq2", "irq3"
+ .asciz "irq4", "irq5", "irq6", "irq7"
+ .asciz "irq8", "irq9", "irq10", "irq11"
+ .asciz "irq12", "irq13", "irq14", "irq15"
+ .asciz "irq16", "irq17", "irq18", "irq19"
+ .asciz "irq20", "irq21", "irq22", "irq23"
+ .asciz "irq24", "irq25", "irq26", "irq27"
+ .asciz "irq28", "irq29", "irq30", "irq31"
+ .asciz "irq32", "irq33", "irq34", "irq35"
+ .asciz "irq36", "irq37", "irq38", "irq39"
+ .asciz "irq40", "irq41", "irq42", "irq43"
+ .asciz "irq44", "irq45", "irq46", "irq47"
+ .asciz "irq48", "irq49", "irq50", "irq51"
+ .asciz "irq52", "irq53", "irq54", "irq55"
+ .asciz "irq56", "irq57", "irq58", "irq59"
+ .asciz "irq60", "irq61", "irq62", "irq63"
+ .asciz "clock", "softclock", "softnet", "softserial"
+GLOBAL(eintrnames)
+ .align 4
+GLOBAL(intrcnt)
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .long 0,0,0,0
+GLOBAL(eintrcnt)
+
+GLOBAL(ofmsr)
+ .long 0 /* msr used in Open Firmware */
+
+GLOBAL(powersave)
+ .long 0
+
+/*
+ * File-scope for locore.S
+ */
+idle_u:
+ .long 0 /* fake uarea during idle after exit */
+openfirmware_entry:
+ .long 0 /* openfirmware entry point */
+srsave:
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
+/*
+ * This symbol is here for the benefit of kvm_mkdb, and is supposed to
+ * mark the start of kernel text.
+ */
+ .text
+ .globl kernel_text
+kernel_text:
+
+/*
+ * Startup entry. Note, this must be the first thing in the text
+ * segment!
+ */
+ .text
+ .globl start
+start:
+#ifdef FIRMWORKSBUGS
+ mfmsr 0
+ andi. 0,0,PSL_IR|PSL_DR
+ beq 1f
+
+ bl ofwr_init
+1:
+#endif
+ li 8,0
+ li 9,0x100
+ mtctr 9
+1:
+ dcbf 0,8
+ icbi 0,8
+ addi 8,8,0x20
+ bdnz 1b
+ sync
+ isync
+
+ mtibatu 0,0
+ mtibatu 1,0
+ mtibatu 2,0
+ mtibatu 3,0
+ mtdbatu 0,0
+ mtdbatu 1,0
+ mtdbatu 2,0
+ mtdbatu 3,0
+
+ li 9,0x12
+ mtibatl 0,9
+ mtdbatl 0,9
+ li 9,0x1ffe
+ mtibatu 0,9
+ mtdbatu 0,9
+ isync
+
+ lis 8,openfirmware_entry@ha
+ stw 5,openfirmware_entry@l(8) /* save client interface handler */
+ mr 3,5
+
+ lis 1,tmpstk@ha
+ addi 1,1,tmpstk@l
+ addi 1,1,8192
+
+ mfmsr 0
+ lis 9,ofmsr@ha
+ stw 0,ofmsr@l(9)
+
+ bl OF_init
+
+ lis 4,end@ha
+ addi 4,4,end@l
+ mr 5,4
+ li 9,PAGE_MASK
+ add 4,4,9
+ andc 4,4,9
+ lis 9,OF_buf@ha
+ stw 4,OF_buf@l(9)
+ addi 4,4,PAGE_SIZE
+ lis 9,proc0paddr@ha
+ stw 4,proc0paddr@l(9)
+ addi 4,4,USPACE-FRAMELEN
+ mr 1,4
+ xor 0,0,0
+ stwu 0,-16(1)
+
+ lis 3,kernel_text@ha
+ addi 3,3,kernel_text@l
+#if 0
+ mr 5,6
+#endif
+
+ bl powerpc_init
+ bl mi_startup
+ b OF_exit
+
+#if 0 /* XXX: We may switch back to this in the future. */
+/*
+ * OpenFirmware entry point
+ */
+ENTRY(openfirmware)
+ mflr 0 /* save return address */
+ stw 0,4(1)
+ stwu 1,-16(1) /* setup stack frame */
+
+ mfmsr 4 /* save msr */
+ stw 4,8(1)
+
+ lis 4,openfirmware_entry@ha /* get firmware entry point */
+ lwz 4,openfirmware_entry@l(4)
+ mtlr 4
+
+ li 0,0 /* clear battable translations */
+ mtdbatu 2,0
+ mtdbatu 3,0
+ mtibatu 2,0
+ mtibatu 3,0
+
+ lis 4,ofmsr@ha /* Open Firmware msr */
+ lwz 4,ofmsr@l(4)
+ mtmsr 4
+ isync
+
+ lis 4,srsave@ha /* save old SR */
+ addi 4,4,srsave@l
+ li 5,0
+1: mfsrin 0,5
+ stw 0,0(4)
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+
+ lis 4,ofw_pmap@ha /* load OFW SR */
+ addi 4,4,ofw_pmap@l
+ lwz 0,PM_KERNELSR(4)
+ cmpwi 0,0 /* pm_sr[KERNEL_SR] == 0? */
+ beq 2f /* then skip (not initialized yet) */
+ li 5,0
+1: lwz 0,0(4)
+ mtsrin 0,5
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+2:
+ blrl /* call Open Firmware */
+
+ mfmsr 4
+ li 5,PSL_IR|PSL_DR
+ andc 4,4,5
+ mtmsr 4
+ isync
+
+ lis 4,srsave@ha /* restore saved SR */
+ addi 4,4,srsave@l
+ li 5,0
+1: lwz 0,0(4)
+ mtsrin 0,5
+ addi 4,4,4
+ addis 5,5,0x10000000@h
+ cmpwi 5,0
+ bne 1b
+
+ lwz 4,8(1) /* restore msr */
+ mtmsr 4
+ isync
+
+ lwz 1,0(1) /* and return */
+ lwz 0,4(1)
+ mtlr 0
+ blr
+#endif
+
+/*
+ * Switch to/from OpenFirmware real mode stack
+ *
+ * Note: has to be called as the very first thing in OpenFirmware interface
+ * routines.
+ * E.g.:
+ * int
+ * OF_xxx(arg1, arg2)
+ * type arg1, arg2;
+ * {
+ * static struct {
+ * char *name;
+ * int nargs;
+ * int nreturns;
+ * char *method;
+ * int arg1;
+ * int arg2;
+ * int ret;
+ * } args = {
+ * "xxx",
+ * 2,
+ * 1,
+ * };
+ *
+ * ofw_stack();
+ * args.arg1 = arg1;
+ * args.arg2 = arg2;
+ * if (openfirmware(&args) < 0)
+ * return -1;
+ * return args.ret;
+ * }
+ */
+
+ .local firmstk
+ .comm firmstk,PAGE_SIZE,8
+
+ENTRY(ofw_stack)
+ mfmsr 8 /* turn off interrupts */
+ andi. 0,8,~(PSL_EE|PSL_RI)@l
+ mtmsr 0
+ stw 8,4(1) /* abuse return address slot */
+
+ lwz 5,0(1) /* get length of stack frame */
+ subf 5,1,5
+
+ lis 7,firmstk+PAGE_SIZE-8@ha
+ addi 7,7,firmstk+PAGE_SIZE-8@l
+ lis 6,ofw_back@ha
+ addi 6,6,ofw_back@l
+ subf 4,5,7 /* make room for stack frame on
+ new stack */
+ stw 6,-4(7) /* setup return pointer */
+ stwu 1,-8(7)
+
+ stw 7,-8(4)
+
+ addi 3,1,8
+ addi 1,4,-8
+ subi 5,5,8
+
+ cmpw 3,4
+ beqlr
+
+ mr 0,5
+ addi 5,5,-1
+ cmpwi 0,0
+ beqlr
+
+1: lwz 0,0(3)
+ stw 0,0(4)
+ addi 3,3,1
+ addi 4,4,1
+ mr 0,5
+ addi 5,5,-1
+ cmpwi 0,0
+ bne 1b
+ blr
+
+ofw_back:
+ lwz 1,0(1) /* get callers original stack pointer */
+
+ lwz 0,4(1) /* get saved msr from abused slot */
+ mtmsr 0
+
+ lwz 1,0(1) /* return */
+ lwz 0,4(1)
+ mtlr 0
+ blr
+
+/*
+ * Data used during primary/secondary traps/interrupts
+ */
+#define tempsave 0x2e0 /* primary save area for trap handling */
+#define disisave 0x3e0 /* primary save area for dsi/isi traps */
+
+#define INTSTK (8*1024) /* 8K interrupt stack */
+ .data
+ .align 4
+intstk:
+ .space INTSTK /* interrupt stack */
+
+GLOBAL(intr_depth)
+ .long -1 /* in-use marker */
+
+#define SPILLSTK 1024 /* 1K spill stack */
+
+ .comm spillstk,SPILLSTK,8
+
+/*
+ * This code gets copied to all the trap vectors
+ * (except ISI/DSI, ALI, the interrupts, and possibly the debugging
+ * traps when using IPKDB).
+ */
+ .text
+ .globl trapcode,trapsize
+trapcode:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+/* Test whether we already had PR set */
+ mfsrr1 31
+ mtcr 31
+ bc 4,17,1f /* branch if PSL_PR is clear */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+1:
+ bla s_trap
+trapsize = .-trapcode
+
+/*
+ * For ALI: has to save DSISR and DAR
+ */
+ .globl alitrap,alisize
+alitrap:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mfdar 30
+ mfdsisr 31
+ stmw 30,tempsave+16(0)
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+/* Test whether we already had PR set */
+ mfsrr1 31
+ mtcr 31
+ bc 4,17,1f /* branch if PSL_PR is clear */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+1:
+ bla s_trap
+alisize = .-alitrap
+
+/*
+ * Similar to the above for DSI
+ * Has to handle BAT spills
+ * and standard pagetable spills
+ */
+ .globl dsitrap,dsisize
+dsitrap:
+ stmw 28,disisave(0) /* free r28-r31 */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ mtsprg 2,30 /* in SPRG2 */
+ mfsrr1 31 /* test kernel mode */
+ mtcr 31
+ bc 12,17,1f /* branch if PSL_PR is set */
+ mfdar 31 /* get fault address */
+ rlwinm 31,31,7,25,28 /* get segment * 8 */
+
+ /* get batu */
+ addis 31,31,battable@ha
+ lwz 30,battable@l(31)
+ mtcr 30
+ bc 4,30,1f /* branch if supervisor valid is
+ false */
+ /* get batl */
+ lwz 31,battable+4@l(31)
+/* We randomly use the highest two bat registers here */
+ mftb 28
+ andi. 28,28,1
+ bne 2f
+ mtdbatu 2,30
+ mtdbatl 2,31
+ b 3f
+2:
+ mtdbatu 3,30
+ mtdbatl 3,31
+3:
+ mfsprg 30,2 /* restore XER */
+ mtxer 30
+ mtcr 29 /* restore CR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+1:
+ mflr 28 /* save LR */
+ bla s_dsitrap
+dsisize = .-dsitrap
+
+/*
+ * Similar to the above for ISI
+ */
+ .globl isitrap,isisize
+isitrap:
+ stmw 28,disisave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfsrr1 31 /* test kernel mode */
+ mtcr 31
+ bc 12,17,1f /* branch if PSL_PR is set */
+ mfsrr0 31 /* get fault address */
+ rlwinm 31,31,7,25,28 /* get segment * 8 */
+
+ /* get batu */
+ addis 31,31,battable@ha
+ lwz 30,battable@l(31)
+ mtcr 30
+ bc 4,30,1f /* branch if supervisor valid is
+ false */
+ mtibatu 3,30
+
+ /* get batl */
+ lwz 30,battable+4@l(31)
+ mtibatl 3,30
+
+ mtcr 29 /* restore CR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+1:
+ bla s_isitrap
+isisize = .-isitrap
+
+/*
+ * This one for the external interrupt handler.
+ */
+ .globl extint,extsize
+extint:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ lis 1,intstk+INTSTK@ha /* get interrupt stack */
+ addi 1,1,intstk+INTSTK@l
+ lwz 31,0(1) /* were we already running on intstk? */
+ addic. 31,31,1
+ stw 31,0(1)
+ beq 1f
+ mfsprg 1,1 /* yes, get old SP */
+1:
+ ba extintr
+extsize = .-extint
+
+/*
+ * And this one for the decrementer interrupt handler.
+ */
+ .globl decrint,decrsize
+decrint:
+ mtsprg 1,1 /* save SP */
+ stmw 28,tempsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ mfxer 30 /* save XER */
+ lis 1,intstk+INTSTK@ha /* get interrupt stack */
+ addi 1,1,intstk+INTSTK@l
+ lwz 31,0(1) /* were we already running on intstk? */
+ addic. 31,31,1
+ stw 31,0(1)
+ beq 1f
+ mfsprg 1,1 /* yes, get old SP */
+1:
+ ba decrintr
+decrsize = .-decrint
+
+/*
+ * Now the tlb software load for 603 processors:
+ * (Code essentially from the 603e User Manual, Chapter 5, but
+ * corrected a lot.)
+ */
+#define DMISS 976
+#define DCMP 977
+#define HASH1 978
+#define HASH2 979
+#define IMISS 980
+#define ICMP 981
+#define RPA 982
+
+ .globl tlbimiss,tlbimsize
+tlbimiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,ICMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ andi. 3,1,8 /* check G-bit */
+ bne 4f /* if guarded, take ISI */
+ mtctr 0 /* restore counter */
+ mfspr 0,IMISS /* get the miss address for the tlbli */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ ori 1,1,0x100 /* set the reference bit */
+ mtspr RPA,1 /* set the pte */
+ srwi 1,1,8 /* get byte 7 of pte */
+ tlbli 0 /* load the itlb */
+ stb 1,6(2) /* update page table */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+4: /* guarded */
+ mfsrr1 3
+ andi. 2,3,0xffff /* clean upper srr1 */
+ oris 2,2,0x8000000@h /* set srr<4> to flag prot violation */
+ b 6f
+5: /* not found anywhere */
+ mfsrr1 3
+ andi. 2,3,0xffff /* clean upper srr1 */
+ oris 2,2,0x40000000@h /* set srr1<1> to flag pte not found */
+6:
+ mtctr 0 /* restore counter */
+ mtsrr1 2
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_ISI
+tlbimsize = .-tlbimiss
+
+ .globl tlbdlmiss,tlbdlmsize
+tlbdlmiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,DCMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ mtctr 0 /* restore counter */
+ mfspr 0,DMISS /* get the miss address for the tlbld */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ ori 1,1,0x100 /* set the reference bit */
+ mtspr RPA,1 /* set the pte */
+ srwi 1,1,8 /* get byte 7 of pte */
+ tlbld 0 /* load the dtlb */
+ stb 1,6(2) /* update page table */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+5: /* not found anywhere */
+ mfsrr1 3
+ lis 1,0x40000000@h /* set dsisr<1> to flag pte not found */
+ mtctr 0 /* restore counter */
+ andi. 2,3,0xffff /* clean upper srr1 */
+ mtsrr1 2
+ mtdsisr 1 /* load the dsisr */
+ mfspr 1,DMISS /* get the miss address */
+ mtdar 1 /* put in dar */
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_DSI
+tlbdlmsize = .-tlbdlmiss
+
+ .globl tlbdsmiss,tlbdsmsize
+tlbdsmiss:
+ mfspr 2,HASH1 /* get first pointer */
+ li 1,8
+ mfctr 0 /* save counter */
+ mfspr 3,DCMP /* get first compare value */
+ addi 2,2,-8 /* predec pointer */
+1:
+ mtctr 1 /* load counter */
+2:
+ lwzu 1,8(2) /* get next pte */
+ cmpl 0,1,3 /* see if found pte */
+ bdneq 2b /* loop if not eq */
+ bne 3f /* not found */
+ lwz 1,4(2) /* load tlb entry lower word */
+ andi. 3,1,0x80 /* check the C-bit */
+ beq 4f
+5:
+ mtctr 0 /* restore counter */
+ mfspr 0,DMISS /* get the miss address for the tlbld */
+ mfsrr1 3 /* get the saved cr0 bits */
+ mtcrf 0x80,3 /* and restore */
+ mtspr RPA,1 /* set the pte */
+ tlbld 0 /* load the dtlb */
+ rfi
+
+3: /* not found in pteg */
+ andi. 1,3,0x40 /* have we already done second hash? */
+ bne 5f
+ mfspr 2,HASH2 /* get the second pointer */
+ ori 3,3,0x40 /* change the compare value */
+ li 1,8
+ addi 2,2,-8 /* predec pointer */
+ b 1b
+4: /* found, but C-bit = 0 */
+ rlwinm. 3,1,30,0,1 /* test PP */
+ bge- 7f
+ andi. 3,1,1
+ beq+ 8f
+9: /* found, but protection violation (PP==00)*/
+ mfsrr1 3
+ lis 1,0xa000000@h /* indicate protection violation
+ on store */
+ b 1f
+7: /* found, PP=1x */
+ mfspr 3,DMISS /* get the miss address */
+ mfsrin 1,3 /* get the segment register */
+ mfsrr1 3
+ rlwinm 3,3,18,31,31 /* get PR-bit */
+ rlwnm. 2,2,3,1,1 /* get the key */
+ bne- 9b /* protection violation */
+8: /* found, set reference/change bits */
+ lwz 1,4(2) /* reload tlb entry */
+ ori 1,1,0x180
+ sth 1,6(2)
+ b 5b
+5: /* not found anywhere */
+ mfsrr1 3
+ lis 1,0x42000000@h /* set dsisr<1> to flag pte not found */
+ /* dsisr<6> to flag store */
+1:
+ mtctr 0 /* restore counter */
+ andi. 2,3,0xffff /* clean upper srr1 */
+ mtsrr1 2
+ mtdsisr 1 /* load the dsisr */
+ mfspr 1,DMISS /* get the miss address */
+ mtdar 1 /* put in dar */
+ mfmsr 0
+ xoris 0,0,0x20000@h /* flip the msr<tgpr> bit */
+ mtcrf 0x80,3 /* restore cr0 */
+ mtmsr 0 /* now with native gprs */
+ isync
+ ba EXC_DSI
+tlbdsmsize = .-tlbdsmiss
+
+#ifdef DDB
+#define ddbsave 0xde0 /* primary save area for DDB */
+/*
+ * In case of DDB we want a separate trap catcher for it
+ */
+ .local ddbstk
+ .comm ddbstk,INTSTK,8 /* ddb stack */
+
+ .globl ddblow,ddbsize
+ddblow:
+ mtsprg 1,1 /* save SP */
+ stmw 28,ddbsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ lis 1,ddbstk+INTSTK@ha /* get new SP */
+ addi 1,1,ddbstk+INTSTK@l
+ bla ddbtrap
+ddbsize = .-ddblow
+#endif /* DDB */
+
+#ifdef IPKDB
+#define ipkdbsave 0xde0 /* primary save area for IPKDB */
+/*
+ * In case of IPKDB we want a separate trap catcher for it
+ */
+
+ .local ipkdbstk
+ .comm ipkdbstk,INTSTK,8 /* ipkdb stack */
+
+ .globl ipkdblow,ipkdbsize
+ipkdblow:
+ mtsprg 1,1 /* save SP */
+ stmw 28,ipkdbsave(0) /* free r28-r31 */
+ mflr 28 /* save LR */
+ mfcr 29 /* save CR */
+ lis 1,ipkdbstk+INTSTK@ha /* get new SP */
+ addi 1,1,ipkdbstk+INTSTK@l
+ bla ipkdbtrap
+ipkdbsize = .-ipkdblow
+#endif /* IPKDB */
+
+/*
+ * FRAME_SETUP assumes:
+ * SPRG1 SP (1)
+ * savearea r28-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
+ * 28 LR
+ * 29 CR
+ * 1 kernel stack
+ * LR trap type
+ * SRR0/1 as at start of trap
+ */
+#define FRAME_SETUP(savearea) \
+/* Have to enable translation to allow access of kernel stack: */ \
+ mfsrr0 30; \
+ mfsrr1 31; \
+ stmw 30,savearea+24(0); \
+ mfmsr 30; \
+ ori 30,30,(PSL_DR|PSL_IR); \
+ mtmsr 30; \
+ isync; \
+ mfsprg 31,1; \
+ stwu 31,-FRAMELEN(1); \
+ stw 0,FRAME_0+8(1); \
+ stw 31,FRAME_1+8(1); \
+ stw 28,FRAME_LR+8(1); \
+ stw 29,FRAME_CR+8(1); \
+ lmw 28,savearea(0); \
+ stmw 2,FRAME_2+8(1); \
+ lmw 28,savearea+16(0); \
+ mfxer 3; \
+ mfctr 4; \
+ mflr 5; \
+ andi. 5,5,0xff00; \
+ stw 3,FRAME_XER+8(1); \
+ stw 4,FRAME_CTR+8(1); \
+ stw 5,FRAME_EXC+8(1); \
+ stw 28,FRAME_DAR+8(1); \
+ stw 29,FRAME_DSISR+8(1); \
+ stw 30,FRAME_SRR0+8(1); \
+ stw 31,FRAME_SRR1+8(1)
+
+#define FRAME_LEAVE(savearea) \
+/* Now restore regs: */ \
+ lwz 2,FRAME_SRR0+8(1); \
+ lwz 3,FRAME_SRR1+8(1); \
+ lwz 4,FRAME_CTR+8(1); \
+ lwz 5,FRAME_XER+8(1); \
+ lwz 6,FRAME_LR+8(1); \
+ lwz 7,FRAME_CR+8(1); \
+ stw 2,savearea(0); \
+ stw 3,savearea+4(0); \
+ mtctr 4; \
+ mtxer 5; \
+ mtlr 6; \
+ mtsprg 1,7; /* save cr */ \
+ lmw 2,FRAME_2+8(1); \
+ lwz 0,FRAME_0+8(1); \
+ lwz 1,FRAME_1+8(1); \
+ mtsprg 2,2; /* save r2 & r3 */ \
+ mtsprg 3,3; \
+/* Disable translation, machine check and recoverability: */ \
+ mfmsr 2; \
+ andi. 2,2,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \
+ mtmsr 2; \
+ isync; \
+/* Decide whether we return to user mode: */ \
+ lwz 3,savearea+4(0); \
+ mtcr 3; \
+ bc 4,17,1f; /* branch if PSL_PR is false */ \
+/* Restore user & kernel access SR: */ \
+/* lis 2,curpm@ha; get real address of pmap */ \
+/* lwz 2,curpm@l(2); */ \
+/* lwz 3,PM_USRSR(2); */ \
+/* mtsr USER_SR,3; */ \
+/* lwz 3,PM_KERNELSR(2); */ \
+/* mtsr KERNEL_SR,3; */ \
+1: mfsprg 2,1; /* restore cr */ \
+ mtcr 2; \
+ lwz 2,savearea(0); \
+ lwz 3,savearea+4(0); \
+ mtsrr0 2; \
+ mtsrr1 3; \
+ mfsprg 2,2; /* restore r2 & r3 */ \
+ mfsprg 3,3
+
+/*
+ * Preamble code for DSI/ISI traps
+ */
+disitrap:
+ lmw 30,disisave(0)
+ stmw 30,tempsave(0)
+ lmw 30,disisave+8(0)
+ stmw 30,tempsave+8(0)
+ mfdar 30
+ mfdsisr 31
+ stmw 30,tempsave+16(0)
+realtrap:
+/* Test whether we already had PR set */
+ mfsrr1 1
+ mtcr 1
+ mfsprg 1,1 /* restore SP (might have been
+ overwritten) */
+ bc 4,17,s_trap /* branch if PSL_PR is false */
+ mfsprg 1,0
+ lwz 1,GD_CURPCB(1)
+ addi 1,1,USPACE /* stack is top of user struct */
+
+/*
+ * Now the common trap catching code.
+ */
+s_trap:
+/* First have to enable KERNEL mapping */
+ lis 31,KERNEL_SEGMENT@h
+ ori 31,31,KERNEL_SEGMENT@l
+ mtsr KERNEL_SR,31
+ FRAME_SETUP(tempsave)
+/* Now we can recover interrupts again: */
+ mfmsr 7
+ ori 7,7,(PSL_EE|PSL_ME|PSL_RI)@l
+ mtmsr 7
+ isync
+/* Call C trap code: */
+trapagain:
+ addi 3,1,8
+ bl trap
+trapexit:
+/* Disable interrupts: */
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l
+ mtmsr 3
+/* Test AST pending: */
+ lwz 5,FRAME_SRR1+8(1)
+ mtcr 5
+ bc 4,17,1f /* branch if PSL_PR is false */
+ lis 3,astpending@ha
+ lwz 4,astpending@l(3)
+ andi. 4,4,1
+ beq 1f
+#if 0 /* XXX */
+ li 6,EXC_AST
+#endif
+ stw 6,FRAME_EXC+8(1)
+ b trapagain
+1:
+#if 0
+ FRAME_LEAVE(tempsave)
+#endif
+ rfi
+
+/*
+ * Child comes here at the end of a fork.
+ * Mostly similar to the above.
+ */
+ .globl fork_trampoline
+fork_trampoline:
+ xor 3,3,3
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+ mtlr 31
+ mr 3,30
+ blrl /* jump indirect to r31 */
+ b trapexit
+
+/*
+ * DSI second stage fault handler
+ */
+s_dsitrap:
+ mfdsisr 31 /* test whether this may be a
+ spill fault */
+ mtcr 31
+ mtsprg 1,1 /* save SP */
+ bc 4,1,disitrap /* branch if table miss is false */
+ lis 1,spillstk+SPILLSTK@ha
+ addi 1,1,spillstk+SPILLSTK@l /* get spill stack */
+ stwu 1,-52(1)
+ stw 0,48(1) /* save non-volatile registers */
+ stw 3,44(1)
+ stw 4,40(1)
+ stw 5,36(1)
+ stw 6,32(1)
+ stw 7,28(1)
+ stw 8,24(1)
+ stw 9,20(1)
+ stw 10,16(1)
+ stw 11,12(1)
+ stw 12,8(1)
+ mflr 30 /* save trap type */
+ mfctr 31 /* & CTR */
+ mfdar 3
+s_pte_spill:
+ bl pte_spill /* try a spill */
+ or. 3,3,3
+ mtctr 31 /* restore CTR */
+ mtlr 30 /* and trap type */
+ mfsprg 31,2 /* get saved XER */
+ mtxer 31 /* restore XER */
+ lwz 12,8(1) /* restore non-volatile registers */
+ lwz 11,12(1)
+ lwz 10,16(1)
+ lwz 9,20(1)
+ lwz 8,24(1)
+ lwz 7,28(1)
+ lwz 6,32(1)
+ lwz 5,36(1)
+ lwz 4,40(1)
+ lwz 3,44(1)
+ lwz 0,48(1)
+ beq disitrap
+ mfsprg 1,1 /* restore SP */
+ mtcr 29 /* restore CR */
+ mtlr 28 /* restore LR */
+ lmw 28,disisave(0) /* restore r28-r31 */
+ rfi /* return to trapped code */
+
+/*
+ * ISI second stage fault handler
+ */
+s_isitrap:
+ mfsrr1 31 /* test whether this may be a
+ spill fault */
+ mtcr 31
+ mtsprg 1,1 /* save SP */
+ bc 4,1,disitrap /* branch if table miss is false */
+ lis 1,spillstk+SPILLSTK@ha
+ addi 1,1,spillstk+SPILLSTK@l /* get spill stack */
+ stwu 1,-52(1)
+ stw 0,48(1) /* save non-volatile registers */
+ stw 3,44(1)
+ stw 4,40(1)
+ stw 5,36(1)
+ stw 6,32(1)
+ stw 7,28(1)
+ stw 8,24(1)
+ stw 9,20(1)
+ stw 10,16(1)
+ stw 11,12(1)
+ stw 12,8(1)
+ mfxer 30 /* save XER */
+ mtsprg 2,30
+ mflr 30 /* save trap type */
+ mfctr 31 /* & ctr */
+ mfsrr0 3
+ b s_pte_spill /* above */
+
+/*
+ * External interrupt second level handler
+ */
+#define INTRENTER \
+/* Save non-volatile registers: */ \
+ stwu 1,-88(1); /* temporarily */ \
+ stw 0,84(1); \
+ mfsprg 0,1; /* get original SP */ \
+ stw 0,0(1); /* and store it */ \
+ stw 3,80(1); \
+ stw 4,76(1); \
+ stw 5,72(1); \
+ stw 6,68(1); \
+ stw 7,64(1); \
+ stw 8,60(1); \
+ stw 9,56(1); \
+ stw 10,52(1); \
+ stw 11,48(1); \
+ stw 12,44(1); \
+ stw 28,40(1); /* saved LR */ \
+ stw 29,36(1); /* saved CR */ \
+ stw 30,32(1); /* saved XER */ \
+ lmw 28,tempsave(0); /* restore r28-r31 */ \
+ mfctr 6; \
+ lis 5,intr_depth@ha; \
+ lwz 5,intr_depth@l(5); \
+ mfsrr0 4; \
+ mfsrr1 3; \
+ stw 6,28(1); \
+ stw 5,20(1); \
+ stw 4,12(1); \
+ stw 3,8(1); \
+/* interrupts are recoverable here, and enable translation */ \
+ lis 3,(KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@h; \
+ ori 3,3,(KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@l; \
+ mtsr KERNEL_SR,3; \
+ mfmsr 5; \
+ ori 5,5,(PSL_IR|PSL_DR|PSL_RI); \
+ mtmsr 5; \
+ isync
+
+ .globl extint_call
+extintr:
+ INTRENTER
+extint_call:
+ bl extint_call /* to be filled in later */
+
+intr_exit:
+/* Disable interrupts (should already be disabled) and MMU here: */
+ mfmsr 3
+ andi. 3,3,~(PSL_EE|PSL_ME|PSL_RI|PSL_DR|PSL_IR)@l
+ mtmsr 3
+ isync
+/* restore possibly overwritten registers: */
+ lwz 12,44(1)
+ lwz 11,48(1)
+ lwz 10,52(1)
+ lwz 9,56(1)
+ lwz 8,60(1)
+ lwz 7,64(1)
+ lwz 6,8(1)
+ lwz 5,12(1)
+ lwz 4,28(1)
+ lwz 3,32(1)
+ mtsrr1 6
+ mtsrr0 5
+ mtctr 4
+ mtxer 3
+/* Returning to user mode? */
+ mtcr 6 /* saved SRR1 */
+ bc 4,17,1f /* branch if PSL_PR is false */
+ mfsprg 3,0 /* get globaldata */
+ lwz 3,GD_CURPCB(3) /* get curpcb from globaldata */
+ lwz 3,PCB_PMR(3) /* get pmap real address from curpcb */
+ mtsr KERNEL_SR,3
+ lis 3,astpending@ha /* Test AST pending */
+ lwz 4,astpending@l(3)
+ andi. 4,4,1
+ beq 1f
+/* Setup for entry to realtrap: */
+ lwz 3,0(1) /* get saved SP */
+ mtsprg 1,3
+#if 0 /* XXX */
+ li 6,EXC_AST
+#endif
+ stmw 28,tempsave(0) /* establish tempsave again */
+ mtlr 6
+ lwz 28,40(1) /* saved LR */
+ lwz 29,36(1) /* saved CR */
+ lwz 6,68(1)
+ lwz 5,72(1)
+ lwz 4,76(1)
+ lwz 3,80(1)
+ lwz 0,84(1)
+ lis 30,intr_depth@ha /* adjust reentrancy count */
+ lwz 31,intr_depth@l(30)
+ addi 31,31,-1
+ stw 31,intr_depth@l(30)
+ b realtrap
+1:
+/* Here is the normal exit of extintr: */
+ lwz 5,36(1)
+ lwz 6,40(1)
+ mtcr 5
+ mtlr 6
+ lwz 6,68(1)
+ lwz 5,72(1)
+ lis 3,intr_depth@ha /* adjust reentrancy count */
+ lwz 4,intr_depth@l(3)
+ addi 4,4,-1
+ stw 4,intr_depth@l(3)
+ lwz 4,76(1)
+ lwz 3,80(1)
+ lwz 0,84(1)
+ lwz 1,0(1)
+ rfi
+
+/*
+ * Decrementer interrupt second level handler
+ */
+decrintr:
+ INTRENTER
+ addi 3,1,8 /* intr frame */
+ bl decr_intr
+ b intr_exit
+
+#ifdef DDB
+/*
+ * Deliberate entry to ddbtrap
+ */
+ .globl ddb_trap
+ddb_trap:
+ mtsprg 1,1
+ mfmsr 3
+ mtsrr1 3
+ andi. 3,3,~(PSL_EE|PSL_ME)@l
+ mtmsr 3 /* disable interrupts */
+ isync
+ stmw 28,ddbsave(0)
+ mflr 28
+ li 29,EXC_BPT
+ mtlr 29
+ mfcr 29
+ mtsrr0 28
+
+/*
+ * Now the ddb trap catching code.
+ */
+ddbtrap:
+ FRAME_SETUP(ddbsave)
+/* Call C trap code: */
+ addi 3,1,8
+ bl ddb_trap_glue
+ or. 3,3,3
+ bne ddbleave
+/* This wasn't for DDB, so switch to real trap: */
+ lwz 3,FRAME_EXC+8(1) /* save exception */
+ stw 3,ddbsave+8(0)
+ FRAME_LEAVE(ddbsave)
+ mtsprg 1,1 /* prepare for entrance to realtrap */
+ stmw 28,tempsave(0)
+ mflr 28
+ mfcr 29
+ lwz 31,ddbsave+8(0)
+ mtlr 31
+ b realtrap
+ddbleave:
+ FRAME_LEAVE(ddbsave)
+ rfi
+#endif /* DDB */
+
+#ifdef IPKDB
+/*
+ * Deliberate entry to ipkdbtrap
+ */
+ .globl ipkdb_trap
+ipkdb_trap:
+ mtsprg 1,1
+ mfmsr 3
+ mtsrr1 3
+ andi. 3,3,~(PSL_EE|PSL_ME)@l
+ mtmsr 3 /* disable interrupts */
+ isync
+ stmw 28,ipkdbsave(0)
+ mflr 28
+ li 29,EXC_BPT
+ mtlr 29
+ mfcr 29
+ mtsrr0 28
+
+/*
+ * Now the ipkdb trap catching code.
+ */
+ipkdbtrap:
+ FRAME_SETUP(ipkdbsave)
+/* Call C trap code: */
+ addi 3,1,8
+ bl ipkdb_trap_glue
+ or. 3,3,3
+ bne ipkdbleave
+/* This wasn't for IPKDB, so switch to real trap: */
+ lwz 3,FRAME_EXC+8(1) /* save exception */
+ stw 3,ipkdbsave+8(0)
+ FRAME_LEAVE(ipkdbsave)
+ mtsprg 1,1 /* prepare for entrance to realtrap */
+ stmw 28,tempsave(0)
+ mflr 28
+ mfcr 29
+ lwz 31,ipkdbsave+8(0)
+ mtlr 31
+ b realtrap
+ipkdbleave:
+ FRAME_LEAVE(ipkdbsave)
+ rfi
+
+ipkdbfault:
+ ba _ipkdbfault
+_ipkdbfault:
+ mfsrr0 3
+ addi 3,3,4
+ mtsrr0 3
+ li 3,-1
+ rfi
+
+/*
+ * int ipkdbfbyte(unsigned char *p)
+ */
+ .globl ipkdbfbyte
+ipkdbfbyte:
+ li 9,EXC_DSI /* establish new fault routine */
+ lwz 5,0(9)
+ lis 6,ipkdbfault@ha
+ lwz 6,ipkdbfault@l(6)
+ stw 6,0(9)
+#ifdef IPKDBUSERHACK
+ lis 8,ipkdbsr@ha
+ lwz 8,ipkdbsr@l(8)
+ mtsr USER_SR,8
+ isync
+#endif
+ dcbst 0,9 /* flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ lbz 3,0(3) /* fetch data */
+ stw 5,0(9) /* restore previous fault handler */
+ dcbst 0,9 /* and flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ blr
+
+/*
+ * int ipkdbsbyte(unsigned char *p, int c)
+ */
+ .globl ipkdbsbyte
+ipkdbsbyte:
+ li 9,EXC_DSI /* establish new fault routine */
+ lwz 5,0(9)
+ lis 6,ipkdbfault@ha
+ lwz 6,ipkdbfault@l(6)
+ stw 6,0(9)
+#ifdef IPKDBUSERHACK
+ lis 8,ipkdbsr@ha
+ lwz 8,ipkdbsr@l(8)
+ mtsr USER_SR,8
+ isync
+#endif
+ dcbst 0,9 /* flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ mr 6,3
+ xor 3,3,3
+ stb 4,0(6)
+ dcbst 0,6 /* Now do appropriate flushes
+ to data... */
+ sync
+ icbi 0,6 /* and instruction caches */
+ stw 5,0(9) /* restore previous fault handler */
+ dcbst 0,9 /* and flush data... */
+ sync
+ icbi 0,9 /* and instruction caches */
+ blr
+#endif /* IPKDB */
+
+/*
+ * int setfault()
+ *
+ * Similar to setjmp to setup for handling faults on accesses to user memory.
+ * Any routine using this may only call bcopy, either the form below,
+ * or the (currently used) C code optimized, so it doesn't use any non-volatile
+ * registers.
+ */
+ .globl setfault
+setfault:
+ mflr 0
+ mfcr 12
+ mfsprg 4,0
+ lwz 4,GD_CURPCB(4)
+ stw 3,PCB_FAULT(4)
+ stw 0,0(3)
+ stw 1,4(3)
+ stw 2,8(3)
+ stmw 12,12(3)
+ xor 3,3,3
+ blr
+
+/*
+ * Signal "trampoline" code.
+ */
+ .globl sigcode
+sigcode:
+ b sys_exit
+esigcode:
+ .data
+GLOBAL(szsigcode)
+ .long esigcode-sigcode
+ .text
+
diff --git a/sys/powerpc/powerpc/machdep.c b/sys/powerpc/powerpc/machdep.c
new file mode 100644
index 0000000..e4eb98e
--- /dev/null
+++ b/sys/powerpc/powerpc/machdep.c
@@ -0,0 +1,972 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include "opt_ddb.h"
+#include "opt_compat.h"
+#include "opt_msgbuf.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/eventhandler.h>
+#include <sys/sysproto.h>
+#include <sys/mutex.h>
+#include <sys/ktr.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/reboot.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/mbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/msgbuf.h>
+#include <sys/exec.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+#include <sys/linker.h>
+#include <sys/cons.h>
+#include <net/netisr.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+#include <sys/user.h>
+#include <sys/ptrace.h>
+#include <machine/bat.h>
+#include <machine/clock.h>
+#include <machine/md_var.h>
+#include <machine/reg.h>
+#include <machine/fpu.h>
+#include <machine/globaldata.h>
+#include <machine/vmparam.h>
+#include <machine/elf.h>
+#include <machine/trap.h>
+#include <machine/powerpc.h>
+#include <dev/ofw/openfirm.h>
+#include <ddb/ddb.h>
+#include <sys/vnode.h>
+#include <fs/procfs/procfs.h>
+#include <machine/sigframe.h>
+
+int cold = 1;
+
+struct mtx sched_lock;
+struct mtx Giant;
+
+struct user *proc0paddr;
+
+char machine[] = "powerpc";
+SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
+
+static char model[128];
+SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, model, 0, "");
+
+char bootpath[256];
+
+#ifdef DDB
+/* start and end of kernel symbol table */
+void *ksym_start, *ksym_end;
+#endif /* DDB */
+
+static void cpu_startup(void *);
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
+
+void powerpc_init(u_int, u_int, u_int, char *);
+
+int save_ofw_mapping(void);
+int restore_ofw_mapping(void);
+
+void install_extint(void (*)(void));
+
+void osendsig(sig_t, int, sigset_t *, u_long);
+
+struct msgbuf *msgbufp = 0;
+
+int bootverbose = 0, Maxmem = 0;
+long dumplo;
+
+vm_offset_t phys_avail[10];
+
+static int chosen;
+
+struct pmap ofw_pmap;
+extern int ofmsr;
+
+struct bat battable[16];
+
+static void identifycpu(void);
+
+static vm_offset_t buffer_sva, buffer_eva;
+vm_offset_t clean_sva, clean_eva;
+static vm_offset_t pager_sva, pager_eva;
+
+static void
+powerpc_ofw_shutdown(void *junk, int howto)
+{
+ if (howto & RB_HALT) {
+ OF_exit();
+ }
+}
+
+static void
+cpu_startup(void *dummy)
+{
+ unsigned int i;
+ caddr_t v;
+ vm_offset_t maxaddr;
+ vm_size_t size;
+ vm_offset_t firstaddr;
+ vm_offset_t minaddr;
+
+ size = 0;
+
+ /*
+ * Good {morning,afternoon,evening,night}.
+ */
+ identifycpu();
+
+ /* startrtclock(); */
+#ifdef PERFMON
+ perfmon_init();
+#endif
+ printf("real memory = %ld (%ldK bytes)\n", ptoa(Maxmem),
+ ptoa(Maxmem) / 1024);
+
+ /*
+ * Display any holes after the first chunk of extended memory.
+ */
+ if (bootverbose) {
+ int indx;
+
+ printf("Physical memory chunk(s):\n");
+ for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
+ int size1 = phys_avail[indx + 1] - phys_avail[indx];
+
+ printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
+ phys_avail[indx], phys_avail[indx + 1] - 1, size1,
+ size1 / PAGE_SIZE);
+ }
+ }
+
+ /*
+ * Calculate callout wheel size
+ */
+ for (callwheelsize = 1, callwheelbits = 0;
+ callwheelsize < ncallout;
+ callwheelsize <<= 1, ++callwheelbits)
+ ;
+ callwheelmask = callwheelsize - 1;
+
+ /*
+ * Allocate space for system data structures.
+ * The first available kernel virtual address is in "v".
+ * As pages of kernel virtual memory are allocated, "v" is incremented.
+ * As pages of memory are allocated and cleared,
+ * "firstaddr" is incremented.
+ * An index into the kernel page table corresponding to the
+ * virtual memory address maintained in "v" is kept in "mapaddr".
+ */
+
+ /*
+ * Make two passes. The first pass calculates how much memory is
+ * needed and allocates it. The second pass assigns virtual
+ * addresses to the various data structures.
+ */
+ firstaddr = 0;
+again:
+ v = (caddr_t)firstaddr;
+
+#define valloc(name, type, num) \
+ (name) = (type *)v; v = (caddr_t)((name)+(num))
+#define valloclim(name, type, num, lim) \
+ (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
+
+ valloc(callout, struct callout, ncallout);
+ valloc(callwheel, struct callout_tailq, callwheelsize);
+
+ /*
+ * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
+ * For the first 64MB of ram nominally allocate sufficient buffers to
+ * cover 1/4 of our ram. Beyond the first 64MB allocate additional
+ * buffers to cover 1/20 of our ram over 64MB.
+ */
+
+ if (nbuf == 0) {
+ int factor;
+
+ factor = 4 * BKVASIZE / PAGE_SIZE;
+ nbuf = 50;
+ if (Maxmem > 1024)
+ nbuf += min((Maxmem - 1024) / factor, 16384 / factor);
+ if (Maxmem > 16384)
+ nbuf += (Maxmem - 16384) * 2 / (factor * 5);
+ }
+ nswbuf = max(min(nbuf/4, 64), 16);
+
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
+ v = bufhashinit(v);
+
+ /*
+ * End of first pass, size has been calculated so allocate memory
+ */
+ if (firstaddr == 0) {
+ size = (vm_size_t)(v - firstaddr);
+ firstaddr = (vm_offset_t)kmem_alloc(kernel_map,
+ round_page(size));
+ if (firstaddr == 0)
+ panic("startup: no room for tables");
+ goto again;
+ }
+
+ /*
+ * End of second pass, addresses have been assigned
+ */
+ if ((vm_size_t)(v - firstaddr) != size)
+ panic("startup: table size inconsistency");
+
+ clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
+ (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf*BKVASIZE));
+ pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
+ (nswbuf*MAXPHYS) + pager_map_size);
+ pager_map->system_map = 1;
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ (16*(ARG_MAX+(PAGE_SIZE*3))));
+
+ /*
+ * XXX: Mbuf system machine-specific initializations should
+ * go here, if anywhere.
+ */
+
+ /*
+ * Initialize callouts
+ */
+ SLIST_INIT(&callfree);
+ for (i = 0; i < ncallout; i++) {
+ callout_init(&callout[i], 0);
+ callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
+ SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
+ }
+
+ for (i = 0; i < callwheelsize; i++) {
+ TAILQ_INIT(&callwheel[i]);
+ }
+
+ mtx_init(&callout_lock, "callout", MTX_SPIN);
+
+#if defined(USERCONFIG)
+#if defined(USERCONFIG_BOOT)
+ if (1)
+#else
+ if (boothowto & RB_CONFIG)
+#endif
+ {
+ userconfig();
+ cninit(); /* the preferred console may have changed */
+ }
+#endif
+
+ printf("avail memory = %ld (%ldK bytes)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count) / 1024);
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+ vm_pager_bufferinit();
+ EVENTHANDLER_REGISTER(shutdown_final, powerpc_ofw_shutdown, 0,
+ SHUTDOWN_PRI_LAST);
+
+#ifdef SMP
+ /*
+ * OK, enough kmem_alloc/malloc state should be up, lets get on with it!
+ */
+ mp_start(); /* fire up the secondaries */
+ mp_announce();
+#endif /* SMP */
+}
+
+void
+identifycpu()
+{
+ int pvr, cpu;
+
+ /*
+ * Find cpu type (Do it by OpenFirmware?)
+ */
+ __asm ("mfpvr %0" : "=r"(pvr));
+ cpu = pvr >> 16;
+ switch (cpu) {
+ case 1:
+ sprintf(model, "601");
+ break;
+ case 3:
+ sprintf(model, "603");
+ break;
+ case 4:
+ sprintf(model, "604");
+ break;
+ case 5:
+ sprintf(model, "602");
+ break;
+ case 6:
+ sprintf(model, "603e");
+ break;
+ case 7:
+ sprintf(model, "603ev");
+ break;
+ case 8:
+ sprintf(model, "750 (G3)");
+ break;
+ case 9:
+ sprintf(model, "604ev");
+ break;
+ case 12:
+ sprintf(model, "7400 (G4)");
+ break;
+ case 20:
+ sprintf(model, "620");
+ break;
+ default:
+ sprintf(model, "Version %x", cpu);
+ break;
+ }
+ sprintf(model + strlen(model), " (Revision %x)", pvr & 0xffff);
+ printf("CPU: PowerPC %s\n", model);
+}
+
+extern char kernel_text[], _end[];
+
+extern void *trapcode, *trapsize;
+extern void *alitrap, *alisize;
+extern void *dsitrap, *dsisize;
+extern void *isitrap, *isisize;
+extern void *decrint, *decrsize;
+extern void *tlbimiss, *tlbimsize;
+extern void *tlbdlmiss, *tlbdlmsize;
+extern void *tlbdsmiss, *tlbdsmsize;
+
+#if 0 /* XXX: interrupt handler. We'll get to this later */
+extern void ext_intr(void);
+#endif
+
+#ifdef DDB
+extern ddblow, ddbsize;
+#endif
+#ifdef IPKDB
+extern ipkdblow, ipkdbsize;
+#endif
+
+static struct globaldata tmpglobal;
+
+void
+powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, char *args)
+{
+ int exc, scratch;
+ struct mem_region *allmem, *availmem, *mp;
+ struct globaldata *globalp;
+
+ /*
+ * Set up BAT0 to only map the lowest 256 MB area
+ */
+ battable[0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
+ battable[0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
+
+ /*
+ * Map PCI memory space.
+ */
+ battable[0x8].batl = BATL(0x80000000, BAT_I, BAT_PP_RW);
+ battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
+
+ battable[0x9].batl = BATL(0x90000000, BAT_I, BAT_PP_RW);
+ battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
+
+ battable[0xa].batl = BATL(0xa0000000, BAT_I, BAT_PP_RW);
+ battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
+
+ /*
+ * Map obio devices.
+ */
+ battable[0xf].batl = BATL(0xf0000000, BAT_I, BAT_PP_RW);
+ battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
+
+ /*
+ * Now setup fixed bat registers
+ *
+ * Note that we still run in real mode, and the BAT
+ * registers were cleared above.
+ */
+ /* BAT0 used for initial 256 MB segment */
+ __asm __volatile ("mtibatl 0,%0; mtibatu 0,%1;"
+ "mtdbatl 0,%0; mtdbatu 0,%1;"
+ :: "r"(battable[0].batl), "r"(battable[0].batu));
+ /*
+ * Set up battable to map all RAM regions.
+ * This is here because mem_regions() call needs bat0 set up.
+ */
+ mem_regions(&allmem, &availmem);
+ for (mp = allmem; mp->size; mp++) {
+ vm_offset_t pa = mp->start & 0xf0000000;
+ vm_offset_t end = mp->start + mp->size;
+
+ do {
+ u_int n = pa >> 28;
+
+ battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
+ battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
+ pa += 0x10000000;
+ } while (pa < end);
+ }
+
+ chosen = OF_finddevice("/chosen");
+ save_ofw_mapping();
+
+ proc0.p_addr = proc0paddr;
+ bzero(proc0.p_addr, sizeof *proc0.p_addr);
+
+ LIST_INIT(&proc0.p_contested);
+
+/* XXX: NetBSDism I _think_. Not sure yet. */
+#if 0
+ curpm = curpcb->pcb_pmreal = curpcb->pcb_pm = kernel_pmap;
+#endif
+
+ /*
+ * Initialise some mutexes.
+ */
+ mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
+ mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
+ mtx_init(&proc0.p_mtx, "process lock", MTX_DEF);
+ mtx_lock(&Giant);
+
+ /*
+ * Initialise console.
+ */
+ cninit();
+
+#ifdef __notyet__ /* Needs some rethinking regarding real/virtual OFW */
+ OF_set_callback(callback);
+#endif
+
+ /*
+ * Set up trap vectors
+ */
+ for (exc = EXC_RSVD; exc <= EXC_LAST; exc += 0x100) {
+ switch (exc) {
+ default:
+ bcopy(&trapcode, (void *)exc, (size_t)&trapsize);
+ break;
+ case EXC_EXI:
+ /*
+ * This one is (potentially) installed during autoconf
+ */
+ break;
+ case EXC_ALI:
+ bcopy(&alitrap, (void *)EXC_ALI, (size_t)&alisize);
+ break;
+ case EXC_DSI:
+ bcopy(&dsitrap, (void *)EXC_DSI, (size_t)&dsisize);
+ break;
+ case EXC_ISI:
+ bcopy(&isitrap, (void *)EXC_ISI, (size_t)&isisize);
+ break;
+ case EXC_DECR:
+ bcopy(&decrint, (void *)EXC_DECR, (size_t)&decrsize);
+ break;
+ case EXC_IMISS:
+ bcopy(&tlbimiss, (void *)EXC_IMISS, (size_t)&tlbimsize);
+ break;
+ case EXC_DLMISS:
+ bcopy(&tlbdlmiss, (void *)EXC_DLMISS, (size_t)&tlbdlmsize);
+ break;
+ case EXC_DSMISS:
+ bcopy(&tlbdsmiss, (void *)EXC_DSMISS, (size_t)&tlbdsmsize);
+ break;
+#if defined(DDB) || defined(IPKDB)
+ case EXC_TRC:
+ case EXC_PGM:
+ case EXC_BPT:
+#if defined(DDB)
+ bcopy(&ddblow, (void *)exc, (size_t)&ddbsize);
+#else
+ bcopy(&ipkdblow, (void *)exc, (size_t)&ipkdbsize);
+#endif
+ break;
+#endif /* DDB || IPKDB */
+ }
+ }
+
+#if 0 /* XXX: coming soon... */
+ /*
+ * external interrupt handler install
+ */
+ install_extint(ext_intr);
+
+ __syncicache((void *)EXC_RST, EXC_LAST - EXC_RST + 0x100);
+#endif
+
+ /*
+ * Now enable translation (and machine checks/recoverable interrupts).
+ */
+ __asm __volatile ("mfmsr %0; ori %0,%0,%1; mtmsr %0; isync"
+ : "=r"(scratch) : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI));
+
+
+ ofmsr &= ~PSL_IP;
+
+ /*
+ * Parse arg string.
+ */
+#ifdef DDB
+ bcopy(args + strlen(args) + 1, &startsym, sizeof(startsym));
+ bcopy(args + strlen(args) + 5, &endsym, sizeof(endsym));
+ if (startsym == NULL || endsym == NULL)
+ startsym = endsym = NULL;
+#endif
+
+ strcpy(bootpath, args);
+ args = bootpath;
+ while (*++args && *args != ' ');
+ if (*args) {
+ *args++ = 0;
+ while (*args) {
+ switch (*args++) {
+ case 'a':
+ boothowto |= RB_ASKNAME;
+ break;
+ case 's':
+ boothowto |= RB_SINGLE;
+ break;
+ case 'd':
+ boothowto |= RB_KDB;
+ break;
+ case 'v':
+ boothowto |= RB_VERBOSE;
+ break;
+ }
+ }
+ }
+
+#ifdef DDB
+ ddb_init((int)((u_int)endsym - (u_int)startsym), startsym, endsym);
+#endif
+#ifdef IPKDB
+ /*
+ * Now trap to IPKDB
+ */
+ ipkdb_init();
+ if (boothowto & RB_KDB)
+ ipkdb_connect(0);
+#endif
+
+ /*
+ * Set the page size.
+ */
+#if 0
+ vm_set_page_size();
+#endif
+
+ /*
+ * Initialize pmap module.
+ */
+ pmap_bootstrap(startkernel, endkernel);
+
+ restore_ofw_mapping();
+
+ /*
+ * Setup the global data for the bootstrap cpu.
+ */
+ globalp = (struct globaldata *) &tmpglobal;
+
+ /*
+ * XXX: Pass 0 as CPU id. This is bad. We need to work out
+ * XXX: which CPU we are somehow.
+ */
+ globaldata_init(globalp, 0, sizeof(struct globaldata));
+ __asm("mtsprg 0,%0\n" :: "r" (globalp));
+
+ PCPU_GET(next_asn) = 1; /* 0 used for proc0 pmap */
+ PCPU_SET(curproc, &proc0);
+ PCPU_SET(spinlocks, NULL);
+}
+
+static int N_mapping;
+static struct {
+ vm_offset_t va;
+ int len;
+ vm_offset_t pa;
+ int mode;
+} ofw_mapping[256];
+
+int
+save_ofw_mapping()
+{
+ int mmui, mmu;
+
+ OF_getprop(chosen, "mmu", &mmui, 4);
+ mmu = OF_instance_to_package(mmui);
+
+ bzero(ofw_mapping, sizeof(ofw_mapping));
+
+ N_mapping =
+ OF_getprop(mmu, "translations", ofw_mapping, sizeof(ofw_mapping));
+ N_mapping /= sizeof(ofw_mapping[0]);
+
+ return 0;
+}
+
+int
+restore_ofw_mapping()
+{
+ int i;
+ struct vm_page pg;
+
+ pmap_pinit(&ofw_pmap);
+
+ ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
+
+ for (i = 0; i < N_mapping; i++) {
+ vm_offset_t pa = ofw_mapping[i].pa;
+ vm_offset_t va = ofw_mapping[i].va;
+ int size = ofw_mapping[i].len;
+
+ if (va < 0x90000000) /* XXX */
+ continue;
+
+ while (size > 0) {
+ pg.phys_addr = pa;
+ pmap_enter(&ofw_pmap, va, &pg, VM_PROT_ALL,
+ VM_PROT_ALL);
+ pa += PAGE_SIZE;
+ va += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+
+ return 0;
+}
+
+void
+bzero(void *buf, size_t len)
+{
+ caddr_t p;
+
+ p = buf;
+
+ while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
+ *p++ = 0;
+ len--;
+ }
+
+ while (len >= sizeof(u_long) * 8) {
+ *(u_long*) p = 0;
+ *((u_long*) p + 1) = 0;
+ *((u_long*) p + 2) = 0;
+ *((u_long*) p + 3) = 0;
+ len -= sizeof(u_long) * 8;
+ *((u_long*) p + 4) = 0;
+ *((u_long*) p + 5) = 0;
+ *((u_long*) p + 6) = 0;
+ *((u_long*) p + 7) = 0;
+ p += sizeof(u_long) * 8;
+ }
+
+ while (len >= sizeof(u_long)) {
+ *(u_long*) p = 0;
+ len -= sizeof(u_long);
+ p += sizeof(u_long);
+ }
+
+ while (len) {
+ *p++ = 0;
+ len--;
+ }
+}
+
+#if 0
+void
+delay(unsigned n)
+{
+ u_long tb;
+
+ do {
+ __asm __volatile("mftb %0" : "=r" (tb));
+ } while (n > (int)(tb & 0xffffffff));
+}
+#endif
+
+void
+osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
+{
+
+ /* XXX: To be done */
+ return;
+}
+
+void
+sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
+{
+
+ /* XXX: To be done */
+ return;
+}
+
+int
+osigreturn(struct proc *p, struct osigreturn_args *uap)
+{
+
+ /* XXX: To be done */
+ return(ENOSYS);
+}
+
+int
+sigreturn(struct proc *p, struct sigreturn_args *uap)
+{
+
+ /* XXX: To be done */
+ return(ENOSYS);
+}
+
+void
+cpu_boot(int howto)
+{
+}
+
+/*
+ * Shutdown the CPU as much as possible.
+ */
+void
+cpu_halt(void)
+{
+
+ OF_exit();
+}
+
+/*
+ * Set set up registers on exec.
+ */
+void
+setregs(struct proc *p, u_long entry, u_long stack, u_long ps_strings)
+{
+ struct trapframe *tf;
+ struct ps_strings arginfo;
+
+ tf = trapframe(p);
+
+ bzero(tf, sizeof *tf);
+ tf->fixreg[1] = -roundup(-stack + 8, 16);
+
+ /*
+ * XXX Machine-independent code has already copied arguments and
+ * XXX environment to userland. Get them back here.
+ */
+ (void)copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo));
+
+ /*
+ * Set up arguments for _start():
+ * _start(argc, argv, envp, obj, cleanup, ps_strings);
+ *
+ * Notes:
+ * - obj and cleanup are the auxilliary and termination
+ * vectors. They are fixed up by ld.elf_so.
+ * - ps_strings is a NetBSD extention, and will be
+ * ignored by executables which are strictly
+ * compliant with the SVR4 ABI.
+ *
+ * XXX We have to set both regs and retval here due to different
+ * XXX calling convention in trap.c and init_main.c.
+ */
+ tf->fixreg[3] = arginfo.ps_nargvstr;
+ tf->fixreg[4] = (register_t)arginfo.ps_argvstr;
+ tf->fixreg[5] = (register_t)arginfo.ps_envstr;
+ tf->fixreg[6] = 0; /* auxillary vector */
+ tf->fixreg[7] = 0; /* termination vector */
+ tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */
+
+ tf->srr0 = entry;
+ tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
+ p->p_addr->u_pcb.pcb_flags = 0;
+}
+
+extern void *extint, *extsize;
+extern u_long extint_call;
+
+#if 0
+void
+install_extint(void (*handler)(void))
+{
+ u_long offset;
+ int omsr, msr;
+
+ offset = (u_long)handler - (u_long)&extint_call;
+
+#ifdef DIAGNOSTIC
+ if (offset > 0x1ffffff)
+ panic("install_extint: too far away");
+#endif
+ __asm __volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
+ : "=r"(omsr), "=r"(msr) : "K"((u_short)~PSL_EE));
+ extint_call = (extint_call & 0xfc000003) | offset;
+ bcopy(&extint, (void *)EXC_EXI, (size_t)&extsize);
+ __syncicache((void *)&extint_call, sizeof extint_call);
+ __syncicache((void *)EXC_EXI, (int)&extsize);
+ __asm __volatile ("mtmsr %0" :: "r"(omsr));
+}
+#endif
+
+#if !defined(DDB)
+void
+Debugger(const char *msg)
+{
+
+ printf("Debugger(\"%s\") called.\n", msg);
+}
+#endif /* !defined(DDB) */
+
+/* XXX: dummy {fill,set}_[fp]regs */
+int
+fill_regs(struct proc *p, struct reg *regs)
+{
+
+ return (ENOSYS);
+}
+
+int
+fill_fpregs(struct proc *p, struct fpreg *fpregs)
+{
+
+ return (ENOSYS);
+}
+
+int
+set_regs(struct proc *p, struct reg *regs)
+{
+
+ return (ENOSYS);
+}
+
+int
+set_fpregs(struct proc *p, struct fpreg *fpregs)
+{
+
+ return (ENOSYS);
+}
+
+int
+ptrace_set_pc(struct proc *p, unsigned long addr)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+int
+ptrace_single_step(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+int
+ptrace_write_u(struct proc *p, vm_offset_t off, long data)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+int
+ptrace_read_u_check(struct proc *p, vm_offset_t addr, size_t len)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+int
+ptrace_clear_single_step(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return (ENOSYS);
+}
+
+/*
+ * Initialise a struct globaldata.
+ */
+void
+globaldata_init(struct globaldata *globaldata, int cpuid, size_t sz)
+{
+
+ bzero(globaldata, sz);
+ globaldata->gd_cpuid = cpuid;
+ globaldata->gd_next_asn = 0;
+ globaldata->gd_current_asngen = 1;
+}
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
new file mode 100644
index 0000000..ae34d23
--- /dev/null
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -0,0 +1,1747 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ */
+ */
+/*
+ * Copyright (C) 2001 Benno Rice.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/msgbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
+
+#include <sys/user.h>
+
+#include <machine/pcb.h>
+#include <machine/powerpc.h>
+#include <machine/pte.h>
+
+pte_t *ptable;
+int ptab_cnt;
+u_int ptab_mask;
+#define HTABSIZE (ptab_cnt * 64)
+
+#define MINPV 2048
+
+struct pte_ovfl {
+ LIST_ENTRY(pte_ovfl) po_list; /* Linked list of overflow entries */
+ struct pte po_pte; /* PTE for this mapping */
+};
+
+LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */
+
+static struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+static int npgs;
+static u_int nextavail;
+
+#ifndef MSGBUFADDR
+extern vm_offset_t msgbuf_paddr;
+#endif
+
+static struct mem_region *mem, *avail;
+
+vm_offset_t avail_start;
+vm_offset_t avail_end;
+vm_offset_t virtual_avail;
+vm_offset_t virtual_end;
+
+vm_offset_t kernel_vm_end;
+
+static int pmap_pagedaemon_waken = 0;
+
+extern unsigned int Maxmem;
+
+#define ATTRSHFT 4
+
+struct pv_entry *pv_table;
+
+static vm_zone_t pvzone;
+static struct vm_zone pvzone_store;
+static struct vm_object pvzone_obj;
+static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
+static struct pv_entry *pvinit;
+
+#if !defined(PMAP_SHPGPERPROC)
+#define PMAP_SHPGPERPROC 200
+#endif
+
+struct pv_page;
+struct pv_page_info {
+ LIST_ENTRY(pv_page) pgi_list;
+ struct pv_entry *pgi_freelist;
+ int pgi_nfree;
+};
+#define NPVPPG ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
+struct pv_page {
+ struct pv_page_info pvp_pgi;
+ struct pv_entry pvp_pv[NPVPPG];
+};
+LIST_HEAD(pv_page_list, pv_page) pv_page_freelist;
+int pv_nfree;
+int pv_pcnt;
+static struct pv_entry *pmap_alloc_pv(void);
+static void pmap_free_pv(struct pv_entry *);
+
+struct po_page;
+struct po_page_info {
+ LIST_ENTRY(po_page) pgi_list;
+ vm_page_t pgi_page;
+ LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist;
+ int pgi_nfree;
+};
+#define NPOPPG ((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl))
+struct po_page {
+ struct po_page_info pop_pgi;
+ struct pte_ovfl pop_po[NPOPPG];
+};
+LIST_HEAD(po_page_list, po_page) po_page_freelist;
+int po_nfree;
+int po_pcnt;
+static struct pte_ovfl *poalloc(void);
+static void pofree(struct pte_ovfl *, int);
+
+static u_int usedsr[NPMAPS / sizeof(u_int) / 8];
+
+static int pmap_initialized;
+
+int pte_spill(vm_offset_t);
+
+/*
+ * These small routines may have to be replaced,
+ * if/when we support processors other that the 604.
+ */
+static __inline void
+tlbie(vm_offset_t ea)
+{
+
+ __asm __volatile ("tlbie %0" :: "r"(ea));
+}
+
+static __inline void
+tlbsync(void)
+{
+
+ __asm __volatile ("sync; tlbsync; sync");
+}
+
+static __inline void
+tlbia(void)
+{
+ vm_offset_t i;
+
+ __asm __volatile ("sync");
+ for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) {
+ tlbie(i);
+ }
+ tlbsync();
+}
+
+static __inline int
+ptesr(sr_t *sr, vm_offset_t addr)
+{
+
+ return sr[(u_int)addr >> ADDR_SR_SHFT];
+}
+
+static __inline int
+pteidx(sr_t sr, vm_offset_t addr)
+{
+ int hash;
+
+ hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
+ return hash & ptab_mask;
+}
+
+static __inline int
+ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which)
+{
+
+ return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) |
+ (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which);
+}
+
+static __inline struct pv_entry *
+pa_to_pv(vm_offset_t pa)
+{
+#if 0 /* XXX */
+ int bank, pg;
+
+ bank = vm_physseg_find(atop(pa), &pg);
+ if (bank == -1)
+ return NULL;
+ return &vm_physmem[bank].pmseg.pvent[pg];
+#endif
+ return (NULL);
+}
+
+static __inline char *
+pa_to_attr(vm_offset_t pa)
+{
+#if 0 /* XXX */
+ int bank, pg;
+
+ bank = vm_physseg_find(atop(pa), &pg);
+ if (bank == -1)
+ return NULL;
+ return &vm_physmem[bank].pmseg.attrs[pg];
+#endif
+ return (NULL);
+}
+
+/*
+ * Try to insert page table entry *pt into the ptable at idx.
+ *
+ * Note: *pt mustn't have PTE_VALID set.
+ * This is done here as required by Book III, 4.12.
+ */
+static int
+pte_insert(int idx, pte_t *pt)
+{
+ pte_t *ptp;
+ int i;
+
+ /*
+ * First try primary hash.
+ */
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (!(ptp->pte_hi & PTE_VALID)) {
+ *ptp = *pt;
+ ptp->pte_hi &= ~PTE_HID;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ return 1;
+ }
+ }
+
+ /*
+ * Then try secondary hash.
+ */
+
+ idx ^= ptab_mask;
+
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (!(ptp->pte_hi & PTE_VALID)) {
+ *ptp = *pt;
+ ptp->pte_hi |= PTE_HID;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Spill handler.
+ *
+ * Tries to spill a page table entry from the overflow area.
+ * Note that this routine runs in real mode on a separate stack,
+ * with interrupts disabled.
+ */
+int
+pte_spill(vm_offset_t addr)
+{
+ int idx, i;
+ sr_t sr;
+ struct pte_ovfl *po;
+ pte_t ps;
+ pte_t *pt;
+
+ __asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
+ idx = pteidx(sr, addr);
+ for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
+ if (ptematch(&po->po_pte, sr, addr, 0)) {
+ /*
+ * Now found an entry to be spilled into the real
+ * ptable.
+ */
+ if (pte_insert(idx, &po->po_pte)) {
+ LIST_REMOVE(po, po_list);
+ pofree(po, 0);
+ return 1;
+ }
+ /*
+ * Have to substitute some entry. Use the primary
+ * hash for this.
+ *
+ * Use low bits of timebase as random generator
+ */
+ __asm ("mftb %0" : "=r"(i));
+ pt = ptable + idx * 8 + (i & 7);
+ pt->pte_hi &= ~PTE_VALID;
+ ps = *pt;
+ __asm __volatile ("sync");
+ tlbie(addr);
+ tlbsync();
+ *pt = po->po_pte;
+ __asm __volatile ("sync");
+ pt->pte_hi |= PTE_VALID;
+ po->po_pte = ps;
+ if (ps.pte_hi & PTE_HID) {
+ /*
+ * We took an entry that was on the alternate
+ * hash chain, so move it to it's original
+ * chain.
+ */
+ po->po_pte.pte_hi &= ~PTE_HID;
+ LIST_REMOVE(po, po_list);
+ LIST_INSERT_HEAD(potable + (idx ^ ptab_mask),
+ po, po_list);
+ }
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This is called during powerpc_init, before the system is really initialized.
+ */
+void
+pmap_bootstrap(u_int kernelstart, u_int kernelend)
+{
+ struct mem_region *mp, *mp1;
+ int cnt, i;
+ u_int s, e, sz;
+
+ /*
+ * Get memory.
+ */
+ mem_regions(&mem, &avail);
+ for (mp = mem; mp->size; mp++)
+ Maxmem += btoc(mp->size);
+
+ /*
+ * Count the number of available entries.
+ */
+ for (cnt = 0, mp = avail; mp->size; mp++) {
+ cnt++;
+ }
+
+ /*
+ * Page align all regions.
+ * Non-page aligned memory isn't very interesting to us.
+ * Also, sort the entries for ascending addresses.
+ */
+ kernelstart &= ~PAGE_MASK;
+ kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK;
+ for (mp = avail; mp->size; mp++) {
+ s = mp->start;
+ e = mp->start + mp->size;
+ /*
+ * Check whether this region holds all of the kernel.
+ */
+ if (s < kernelstart && e > kernelend) {
+ avail[cnt].start = kernelend;
+ avail[cnt++].size = e - kernelend;
+ e = kernelstart;
+ }
+ /*
+ * Look whether this regions starts within the kernel.
+ */
+ if (s >= kernelstart && s < kernelend) {
+ if (e <= kernelend)
+ goto empty;
+ s = kernelend;
+ }
+ /*
+ * Now look whether this region ends within the kernel.
+ */
+ if (e > kernelstart && e <= kernelend) {
+ if (s >= kernelstart)
+ goto empty;
+ e = kernelstart;
+ }
+ /*
+ * Now page align the start and size of the region.
+ */
+ s = round_page(s);
+ e = trunc_page(e);
+ if (e < s) {
+ e = s;
+ }
+ sz = e - s;
+ /*
+ * Check whether some memory is left here.
+ */
+ if (sz == 0) {
+ empty:
+ bcopy(mp + 1, mp,
+ (cnt - (mp - avail)) * sizeof *mp);
+ cnt--;
+ mp--;
+ continue;
+ }
+
+ /*
+ * Do an insertion sort.
+ */
+ npgs += btoc(sz);
+
+ for (mp1 = avail; mp1 < mp; mp1++) {
+ if (s < mp1->start) {
+ break;
+ }
+ }
+
+ if (mp1 < mp) {
+ bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
+ mp1->start = s;
+ mp1->size = sz;
+ } else {
+ mp->start = s;
+ mp->size = sz;
+ }
+ }
+
+#ifdef HTABENTS
+ ptab_cnt = HTABENTS;
+#else
+ ptab_cnt = (Maxmem + 1) / 2;
+
+ /* The minimum is 1024 PTEGs. */
+ if (ptab_cnt < 1024) {
+ ptab_cnt = 1024;
+ }
+
+ /* Round up to power of 2. */
+ __asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1));
+ ptab_cnt = 1 << (32 - i);
+#endif
+
+ /*
+ * Find suitably aligned memory for HTAB.
+ */
+ for (mp = avail; mp->size; mp++) {
+ s = roundup(mp->start, HTABSIZE) - mp->start;
+
+ if (mp->size < s + HTABSIZE) {
+ continue;
+ }
+
+ ptable = (pte_t *)(mp->start + s);
+
+ if (mp->size == s + HTABSIZE) {
+ if (s)
+ mp->size = s;
+ else {
+ bcopy(mp + 1, mp,
+ (cnt - (mp - avail)) * sizeof *mp);
+ mp = avail;
+ }
+ break;
+ }
+
+ if (s != 0) {
+ bcopy(mp, mp + 1,
+ (cnt - (mp - avail)) * sizeof *mp);
+ mp++->size = s;
+ cnt++;
+ }
+
+ mp->start += s + HTABSIZE;
+ mp->size -= s + HTABSIZE;
+ break;
+ }
+
+ if (!mp->size) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(HTABSIZE);
+ bzero((void *)ptable, HTABSIZE);
+ ptab_mask = ptab_cnt - 1;
+
+ /*
+ * We cannot do pmap_steal_memory here,
+ * since we don't run with translation enabled yet.
+ */
+ s = sizeof(struct pte_ovtab) * ptab_cnt;
+ sz = round_page(s);
+
+ for (mp = avail; mp->size; mp++) {
+ if (mp->size >= sz) {
+ break;
+ }
+ }
+
+ if (!mp->size) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(sz);
+ potable = (struct pte_ovtab *)mp->start;
+ mp->size -= sz;
+ mp->start += sz;
+
+ if (mp->size <= 0) {
+ bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
+ }
+
+ for (i = 0; i < ptab_cnt; i++) {
+ LIST_INIT(potable + i);
+ }
+
+#ifndef MSGBUFADDR
+ /*
+ * allow for msgbuf
+ */
+ sz = round_page(MSGBUFSIZE);
+ mp = NULL;
+
+ for (mp1 = avail; mp1->size; mp1++) {
+ if (mp1->size >= sz) {
+ mp = mp1;
+ }
+ }
+
+ if (mp == NULL) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(sz);
+ msgbuf_paddr = mp->start + mp->size - sz;
+ mp->size -= sz;
+
+ if (mp->size <= 0) {
+ bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
+ }
+#endif
+
+ /*
+ * Initialize kernel pmap and hardware.
+ */
+ kernel_pmap = &kernel_pmap_store;
+
+ {
+ int batu, batl;
+
+ batu = 0x80001ffe;
+ batl = 0x80000012;
+
+ __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl));
+ }
+
+
+#if NPMAPS >= KERNEL_SEGMENT / 16
+ usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)]
+ |= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8));
+#endif
+
+#if 0 /* XXX */
+ for (i = 0; i < 16; i++) {
+ kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
+ __asm __volatile ("mtsrin %0,%1"
+ :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
+ }
+#endif
+
+ for (i = 0; i < 16; i++) {
+ int j;
+
+ __asm __volatile ("mfsrin %0,%1"
+ : "=r" (j)
+ : "r" (i << ADDR_SR_SHFT));
+
+ kernel_pmap->pm_sr[i] = j;
+ }
+
+ kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
+ __asm __volatile ("mtsr %0,%1"
+ :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
+
+ __asm __volatile ("sync; mtsdr1 %0; isync"
+ :: "r"((u_int)ptable | (ptab_mask >> 10)));
+
+ tlbia();
+
+ nextavail = avail->start;
+ avail_start = avail->start;
+ for (mp = avail, i = 0; mp->size; mp++) {
+ avail_end = mp->start + mp->size;
+ phys_avail[i++] = mp->start;
+ phys_avail[i++] = mp->start + mp->size;
+ }
+
+ virtual_avail = VM_MIN_KERNEL_ADDRESS;
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+}
+
+/*
+ * Initialize anything else for pmap handling.
+ * Called during vm_init().
+ */
+void
+pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
+{
+ int initial_pvs;
+
+ /*
+ * init the pv free list
+ */
+ initial_pvs = vm_page_array_size;
+ if (initial_pvs < MINPV) {
+ initial_pvs = MINPV;
+ }
+ pvzone = &pvzone_store;
+ pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
+ initial_pvs * sizeof(struct pv_entry));
+ zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit,
+ vm_page_array_size);
+
+ pmap_initialized = TRUE;
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure.
+ */
+void
+pmap_pinit(struct pmap *pm)
+{
+ int i, j;
+
+ /*
+ * Allocate some segment registers for this pmap.
+ */
+ pm->pm_refs = 1;
+ for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) {
+ if (usedsr[i] != 0xffffffff) {
+ j = ffs(~usedsr[i]) - 1;
+ usedsr[i] |= 1 << j;
+ pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16;
+ for (i = 1; i < 16; i++) {
+ pm->pm_sr[i] = pm->pm_sr[i - 1] + 1;
+ }
+ return;
+ }
+ }
+ panic("out of segments");
+}
+
+void
+pmap_pinit2(pmap_t pmap)
+{
+
+ /*
+ * Nothing to be done.
+ */
+ return;
+}
+
+/*
+ * Add a reference to the given pmap.
+ */
+void
+pmap_reference(struct pmap *pm)
+{
+
+ pm->pm_refs++;
+}
+
+/*
+ * Retire the given pmap from service.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_destroy(struct pmap *pm)
+{
+
+ if (--pm->pm_refs == 0) {
+ pmap_release(pm);
+ free((caddr_t)pm, M_VMPGDATA);
+ }
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ */
+void
+pmap_release(struct pmap *pm)
+{
+ int i, j;
+
+ if (!pm->pm_sr[0]) {
+ panic("pmap_release");
+ }
+ i = pm->pm_sr[0] / 16;
+ j = i % (sizeof usedsr[0] * 8);
+ i /= sizeof usedsr[0] * 8;
+ usedsr[i] &= ~(1 << j);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr,
+ vm_size_t len, vm_offset_t src_addr)
+{
+
+ return;
+}
+
+/*
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(void)
+{
+
+ return;
+}
+
+/*
+ * Fill the given physical page with zeroes.
+ */
+void
+pmap_zero_page(vm_offset_t pa)
+{
+#if 0
+ bzero((caddr_t)pa, PAGE_SIZE);
+#else
+ int i;
+
+ for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
+ __asm __volatile ("dcbz 0,%0" :: "r"(pa));
+ pa += CACHELINESIZE;
+ }
+#endif
+}
+
+void
+pmap_zero_page_area(vm_offset_t pa, int off, int size)
+{
+
+ bzero((caddr_t)pa + off, size);
+}
+
+/*
+ * Copy the given physical source page to its destination.
+ */
+void
+pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+{
+
+ bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE);
+}
+
+static struct pv_entry *
+pmap_alloc_pv()
+{
+ pv_entry_count++;
+
+ if (pv_entry_high_water &&
+ (pv_entry_count > pv_entry_high_water) &&
+ (pmap_pagedaemon_waken == 0)) {
+ pmap_pagedaemon_waken = 1;
+ wakeup(&vm_pages_needed);
+ }
+
+ return zalloc(pvzone);
+}
+
+static void
+pmap_free_pv(struct pv_entry *pv)
+{
+
+ pv_entry_count--;
+ zfree(pvzone, pv);
+}
+
+/*
+ * We really hope that we don't need overflow entries
+ * before the VM system is initialized!
+ *
+ * XXX: Should really be switched over to the zone allocator.
+ */
+static struct pte_ovfl *
+poalloc()
+{
+ struct po_page *pop;
+ struct pte_ovfl *po;
+ vm_page_t mem;
+ int i;
+
+ if (!pmap_initialized) {
+ panic("poalloc");
+ }
+
+ if (po_nfree == 0) {
+ /*
+ * Since we cannot use maps for potable allocation,
+ * we have to steal some memory from the VM system. XXX
+ */
+ mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM);
+ po_pcnt++;
+ pop = (struct po_page *)VM_PAGE_TO_PHYS(mem);
+ pop->pop_pgi.pgi_page = mem;
+ LIST_INIT(&pop->pop_pgi.pgi_freelist);
+ for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) {
+ LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po,
+ po_list);
+ }
+ po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1;
+ LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
+ po = pop->pop_po;
+ } else {
+ po_nfree--;
+ pop = po_page_freelist.lh_first;
+ if (--pop->pop_pgi.pgi_nfree <= 0) {
+ LIST_REMOVE(pop, pop_pgi.pgi_list);
+ }
+ po = pop->pop_pgi.pgi_freelist.lh_first;
+ LIST_REMOVE(po, po_list);
+ }
+
+ return po;
+}
+
+static void
+pofree(struct pte_ovfl *po, int freepage)
+{
+ struct po_page *pop;
+
+ pop = (struct po_page *)trunc_page((vm_offset_t)po);
+ switch (++pop->pop_pgi.pgi_nfree) {
+ case NPOPPG:
+ if (!freepage) {
+ break;
+ }
+ po_nfree -= NPOPPG - 1;
+ po_pcnt--;
+ LIST_REMOVE(pop, pop_pgi.pgi_list);
+ vm_page_free(pop->pop_pgi.pgi_page);
+ return;
+ case 1:
+ LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
+ default:
+ break;
+ }
+ LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list);
+ po_nfree++;
+}
+
+/*
+ * This returns whether this is the first mapping of a page.
+ */
+static int
+pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa)
+{
+ struct pv_entry *pv, *npv;
+ int s, first;
+
+ if (!pmap_initialized) {
+ return 0;
+ }
+
+ s = splimp();
+
+ pv = pa_to_pv(pa);
+ first = pv->pv_idx;
+ if (pv->pv_idx == -1) {
+ /*
+ * No entries yet, use header as the first entry.
+ */
+ pv->pv_va = va;
+ pv->pv_idx = pteidx;
+ pv->pv_next = NULL;
+ } else {
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ npv = pmap_alloc_pv();
+ npv->pv_va = va;
+ npv->pv_idx = pteidx;
+ npv->pv_next = pv->pv_next;
+ pv->pv_next = npv;
+ }
+ splx(s);
+ return first;
+}
+
+static void
+pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte)
+{
+ struct pv_entry *pv, *npv;
+ char *attr;
+
+ /*
+ * First transfer reference/change bits to cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return;
+ }
+ *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT;
+
+ /*
+ * Remove from the PV table.
+ */
+ pv = pa_to_pv(pa);
+
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pteidx == pv->pv_idx && va == pv->pv_va) {
+ npv = pv->pv_next;
+ if (npv) {
+ *pv = *npv;
+ pmap_free_pv(npv);
+ } else {
+ pv->pv_idx = -1;
+ }
+ } else {
+ for (; (npv = pv->pv_next); pv = npv) {
+ if (pteidx == npv->pv_idx && va == npv->pv_va) {
+ break;
+ }
+ }
+ if (npv) {
+ pv->pv_next = npv->pv_next;
+ pmap_free_pv(npv);
+ }
+#ifdef DIAGNOSTIC
+ else {
+ panic("pmap_remove_pv: not on list\n");
+ }
+#endif
+ }
+}
+
+/*
+ * Insert physical page at pa into the given pmap at virtual address va.
+ */
+void
+pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot,
+ boolean_t wired)
+{
+ sr_t sr;
+ int idx, s;
+ pte_t pte;
+ struct pte_ovfl *po;
+ struct mem_region *mp;
+ vm_offset_t pa;
+
+ pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK;
+
+ /*
+ * Have to remove any existing mapping first.
+ */
+ pmap_remove(pm, va, va + PAGE_SIZE);
+
+ /*
+ * Compute the HTAB index.
+ */
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ /*
+ * Construct the PTE.
+ *
+ * Note: Don't set the valid bit for correct operation of tlb update.
+ */
+ pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
+ | ((va & ADDR_PIDX) >> ADDR_API_SHFT);
+ pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G;
+
+ for (mp = mem; mp->size; mp++) {
+ if (pa >= mp->start && pa < mp->start + mp->size) {
+ pte.pte_lo &= ~(PTE_I | PTE_G);
+ break;
+ }
+ }
+ if (prot & VM_PROT_WRITE) {
+ pte.pte_lo |= PTE_RW;
+ } else {
+ pte.pte_lo |= PTE_RO;
+ }
+
+ /*
+ * Now record mapping for later back-translation.
+ */
+ if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) {
+ if (pmap_enter_pv(idx, va, pa)) {
+ /*
+ * Flush the real memory from the cache.
+ */
+ __syncicache((void *)pa, PAGE_SIZE);
+ }
+ }
+
+ s = splimp();
+ pm->pm_stats.resident_count++;
+ /*
+ * Try to insert directly into HTAB.
+ */
+ if (pte_insert(idx, &pte)) {
+ splx(s);
+ return;
+ }
+
+ /*
+ * Have to allocate overflow entry.
+ *
+ * Note, that we must use real addresses for these.
+ */
+ po = poalloc();
+ po->po_pte = pte;
+ LIST_INSERT_HEAD(potable + idx, po, po_list);
+ splx(s);
+}
+
+void
+pmap_kenter(vm_offset_t va, vm_offset_t pa)
+{
+ struct vm_page pg;
+
+ pg.phys_addr = pa;
+ pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+}
+
+void
+pmap_kremove(vm_offset_t va)
+{
+ pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
+}
+
+/*
+ * Remove the given range of mapping entries.
+ */
+void
+pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva)
+{
+ int idx, i, s;
+ sr_t sr;
+ pte_t *ptp;
+ struct pte_ovfl *po, *npo;
+
+ s = splimp();
+ while (va < endva) {
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID)) {
+ pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ pm->pm_stats.resident_count--;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
+ ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
+ pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ pm->pm_stats.resident_count--;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = npo) {
+ npo = po->po_list.le_next;
+ if (ptematch(&po->po_pte, sr, va, 0)) {
+ pmap_remove_pv(idx, va, po->po_pte.pte_lo,
+ &po->po_pte);
+ LIST_REMOVE(po, po_list);
+ pofree(po, 1);
+ pm->pm_stats.resident_count--;
+ }
+ }
+ va += PAGE_SIZE;
+ }
+ splx(s);
+}
+
+static pte_t *
+pte_find(struct pmap *pm, vm_offset_t va)
+{
+ int idx, i;
+ sr_t sr;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID)) {
+ return ptp;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
+ return ptp;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
+ if (ptematch(&po->po_pte, sr, va, 0)) {
+ return &po->po_pte;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Get the physical page address for the given pmap/virtual address.
+ */
+vm_offset_t
+pmap_extract(pmap_t pm, vm_offset_t va)
+{
+ pte_t *ptp;
+ int s;
+
+ s = splimp();
+
+ if (!(ptp = pte_find(pm, va))) {
+ splx(s);
+ return (0);
+ }
+ splx(s);
+ return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF));
+}
+
+/*
+ * Lower the protection on the specified range of this pmap.
+ *
+ * There are only two cases: either the protection is going to 0,
+ * or it is going to read-only.
+ */
+void
+pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ pte_t *ptp;
+ int valid, s;
+
+ if (prot & VM_PROT_READ) {
+ s = splimp();
+ while (sva < eva) {
+ ptp = pte_find(pm, sva);
+ if (ptp) {
+ valid = ptp->pte_hi & PTE_VALID;
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(sva);
+ tlbsync();
+ ptp->pte_lo &= ~PTE_PP;
+ ptp->pte_lo |= PTE_RO;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= valid;
+ }
+ sva += PAGE_SIZE;
+ }
+ splx(s);
+ return;
+ }
+ pmap_remove(pm, sva, eva);
+}
+
+boolean_t
+ptemodify(vm_page_t pg, u_int mask, u_int val)
+{
+ vm_offset_t pa;
+ struct pv_entry *pv;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+ int i, s;
+ char *attr;
+ int rv;
+
+ pa = VM_PAGE_TO_PHYS(pg);
+
+ /*
+ * First modify bits in cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return FALSE;
+ }
+
+ *attr &= ~mask >> ATTRSHFT;
+ *attr |= val >> ATTRSHFT;
+
+ pv = pa_to_pv(pa);
+ if (pv->pv_idx < 0) {
+ return FALSE;
+ }
+
+ rv = FALSE;
+ s = splimp();
+ for (; pv; pv = pv->pv_next) {
+ for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(pv->pv_va);
+ tlbsync();
+ rv |= ptp->pte_lo & mask;
+ ptp->pte_lo &= ~mask;
+ ptp->pte_lo |= val;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ }
+ }
+ for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
+ --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(pv->pv_va);
+ tlbsync();
+ rv |= ptp->pte_lo & mask;
+ ptp->pte_lo &= ~mask;
+ ptp->pte_lo |= val;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ }
+ }
+ for (po = potable[pv->pv_idx].lh_first; po;
+ po = po->po_list.le_next) {
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ rv |= ptp->pte_lo & mask;
+ po->po_pte.pte_lo &= ~mask;
+ po->po_pte.pte_lo |= val;
+ }
+ }
+ }
+ splx(s);
+ return rv != 0;
+}
+
+int
+ptebits(vm_page_t pg, int bit)
+{
+ struct pv_entry *pv;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+ int i, s, bits;
+ char *attr;
+ vm_offset_t pa;
+
+ bits = 0;
+ pa = VM_PAGE_TO_PHYS(pg);
+
+ /*
+ * First try the cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return 0;
+ }
+ bits |= (*attr << ATTRSHFT) & bit;
+ if (bits == bit) {
+ return bits;
+ }
+
+ pv = pa_to_pv(pa);
+ if (pv->pv_idx < 0) {
+ return 0;
+ }
+
+ s = splimp();
+ for (; pv; pv = pv->pv_next) {
+ for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ bits |= ptp->pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
+ --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ bits |= ptp->pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ for (po = potable[pv->pv_idx].lh_first; po;
+ po = po->po_list.le_next) {
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ bits |= po->po_pte.pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ }
+ splx(s);
+ return bits;
+}
+
+/*
+ * Lower the protection on the specified physical page.
+ *
+ * There are only two cases: either the protection is going to 0,
+ * or it is going to read-only.
+ */
+void
+pmap_page_protect(vm_page_t m, vm_prot_t prot)
+{
+ vm_offset_t pa;
+ vm_offset_t va;
+ pte_t *ptp;
+ struct pte_ovfl *po, *npo;
+ int i, s, idx;
+ struct pv_entry *pv;
+
+ pa = VM_PAGE_TO_PHYS(m);
+
+ pa &= ~ADDR_POFF;
+ if (prot & VM_PROT_READ) {
+ ptemodify(m, PTE_PP, PTE_RO);
+ return;
+ }
+
+ pv = pa_to_pv(pa);
+ if (pv == NULL) {
+ return;
+ }
+
+ s = splimp();
+ while (pv->pv_idx >= 0) {
+ idx = pv->pv_idx;
+ va = pv->pv_va;
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ goto next;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
+ ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ goto next;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = npo) {
+ npo = po->po_list.le_next;
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, &po->po_pte);
+ LIST_REMOVE(po, po_list);
+ pofree(po, 1);
+ goto next;
+ }
+ }
+next:
+ }
+ splx(s);
+}
+
+/*
+ * Activate the address space for the specified process. If the process
+ * is the current process, load the new MMU context.
+ */
+void
+pmap_activate(struct proc *p)
+{
+ struct pcb *pcb;
+ pmap_t pmap;
+ pmap_t rpm;
+ int psl, i, ksr, seg;
+
+ pcb = &p->p_addr->u_pcb;
+ pmap = p->p_vmspace->vm_map.pmap;
+
+ /*
+ * XXX Normally performed in cpu_fork().
+ */
+ if (pcb->pcb_pm != pmap) {
+ pcb->pcb_pm = pmap;
+ (vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap,
+ (vm_offset_t)pcb->pcb_pm);
+ }
+
+ if (p == curproc) {
+ /* Disable interrupts while switching. */
+ __asm __volatile("mfmsr %0" : "=r"(psl) :);
+ psl &= ~PSL_EE;
+ __asm __volatile("mtmsr %0" :: "r"(psl));
+
+#if 0 /* XXX */
+ /* Store pointer to new current pmap. */
+ curpm = pcb->pcb_pmreal;
+#endif
+
+ /* Save kernel SR. */
+ __asm __volatile("mfsr %0,14" : "=r"(ksr) :);
+
+ /*
+ * Set new segment registers. We use the pmap's real
+ * address to avoid accessibility problems.
+ */
+ rpm = pcb->pcb_pmreal;
+ for (i = 0; i < 16; i++) {
+ seg = rpm->pm_sr[i];
+ __asm __volatile("mtsrin %0,%1"
+ :: "r"(seg), "r"(i << ADDR_SR_SHFT));
+ }
+
+ /* Restore kernel SR. */
+ __asm __volatile("mtsr 14,%0" :: "r"(ksr));
+
+ /* Interrupts are OK again. */
+ psl |= PSL_EE;
+ __asm __volatile("mtmsr %0" :: "r"(psl));
+ }
+}
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ */
+void
+pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vm_offset_t tva = va + i * PAGE_SIZE;
+ pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i]));
+ }
+}
+
+/*
+ * this routine jerks page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ */
+void
+pmap_qremove(vm_offset_t va, int count)
+{
+ vm_offset_t end_va;
+
+ end_va = va + count*PAGE_SIZE;
+
+ while (va < end_va) {
+ unsigned *pte;
+
+ pte = (unsigned *)vtopte(va);
+ *pte = 0;
+ tlbie(va);
+ va += PAGE_SIZE;
+ }
+}
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return the count of reference bits for a page, clearing all of them.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+
+ /* XXX: coming soon... */
+ return (0);
+}
+
+/*
+ * this routine returns true if a physical page resides
+ * in the given pmap.
+ */
+boolean_t
+pmap_page_exists(pmap_t pmap, vm_page_t m)
+{
+#if 0 /* XXX: This must go! */
+ register pv_entry_t pv;
+ int s;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return FALSE;
+
+ s = splvm();
+
+ /*
+ * Not found, check current mappings returning immediately if found.
+ */
+ for (pv = pv_table; pv; pv = pv->pv_next) {
+ if (pv->pv_pmap == pmap) {
+ splx(s);
+ return TRUE;
+ }
+ }
+ splx(s);
+#endif
+ return (FALSE);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+{
+ vm_offset_t sva, va;
+
+ sva = *virt;
+ va = sva;
+
+ while (start < end) {
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+
+ *virt = va;
+ return (sva);
+}
+
+vm_offset_t
+pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
+{
+
+ return (addr);
+}
+
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr)
+{
+
+ /* XXX: coming soon... */
+ return (0);
+}
+
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size, int limit)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_growkernel(vm_offset_t addr)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+/*
+ * Initialize the address space (zone) for the pv_entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
+ */
+void
+pmap_init2()
+{
+ pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
+ pv_entry_high_water = 9 * (pv_entry_max / 10);
+ zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
+}
+
+void
+pmap_swapin_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_swapout_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_new_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable)
+{
+
+ return;
+}
+
+void
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_pinit0(pmap_t pmap)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_dispose_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+vm_offset_t
+pmap_steal_memory(vm_size_t size)
+{
+ vm_size_t bank_size;
+ vm_offset_t pa;
+
+ size = round_page(size);
+
+ bank_size = phys_avail[1] - phys_avail[0];
+ while (size > bank_size) {
+ int i;
+ for (i = 0; phys_avail[i+2]; i+= 2) {
+ phys_avail[i] = phys_avail[i+2];
+ phys_avail[i+1] = phys_avail[i+3];
+ }
+ phys_avail[i] = 0;
+ phys_avail[i+1] = 0;
+ if (!phys_avail[0])
+ panic("pmap_steal_memory: out of memory");
+ bank_size = phys_avail[1] - phys_avail[0];
+ }
+
+ pa = phys_avail[0];
+ phys_avail[0] += size;
+
+ bzero((caddr_t) pa, size);
+ return pa;
+}
diff --git a/sys/powerpc/powerpc/ofw_machdep.c b/sys/powerpc/powerpc/ofw_machdep.c
new file mode 100644
index 0000000..dcdc2dd
--- /dev/null
+++ b/sys/powerpc/powerpc/ofw_machdep.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 1996 Wolfgang Solfrank.
+ * Copyright (C) 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: ofw_machdep.c,v 1.5 2000/05/23 13:25:43 tsubai Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/disk.h>
+#include <sys/disklabel.h>
+#include <sys/fcntl.h>
+#include <sys/malloc.h>
+#include <sys/stat.h>
+
+#include <dev/ofw/openfirm.h>
+
+#include <machine/powerpc.h>
+
+#define OFMEM_REGIONS 32
+static struct mem_region OFmem[OFMEM_REGIONS + 1], OFavail[OFMEM_REGIONS + 3];
+
+extern long ofmsr;
+static int (*ofwcall)(void *);
+
+/*
+ * This is called during powerpc_init, before the system is really initialized.
+ * It shall provide the total and the available regions of RAM.
+ * Both lists must have a zero-size entry as terminator.
+ * The available regions need not take the kernel into account, but needs
+ * to provide space for two additional entry beyond the terminating one.
+ */
+void
+mem_regions(struct mem_region **memp, struct mem_region **availp)
+{
+ int phandle /*, i, j, cnt*/;
+
+ /*
+ * Get memory.
+ */
+ if ((phandle = OF_finddevice("/memory")) == -1
+ || OF_getprop(phandle, "reg",
+ OFmem, sizeof OFmem[0] * OFMEM_REGIONS)
+ <= 0
+ || OF_getprop(phandle, "available",
+ OFavail, sizeof OFavail[0] * OFMEM_REGIONS)
+ <= 0)
+ panic("no memory?");
+ *memp = OFmem;
+ *availp = OFavail;
+}
+
+void
+set_openfirm_callback(int (*openfirm)(void *))
+{
+
+ ofwcall = openfirm;
+}
+
+int
+openfirmware(void *args)
+{
+ long oldmsr;
+ int result;
+
+ __asm( "\t"
+ "mfmsr %0\n\t"
+ "mtmsr %1\n\t"
+ "isync\n"
+ : "=r" (oldmsr)
+ : "r" (ofmsr)
+ );
+
+ result = ofwcall(args);
+
+ __asm( "\t"
+ "mtmsr %0\n\t"
+ "isync\n"
+ : : "r" (oldmsr)
+ );
+
+ return (result);
+}
+
+void
+ppc_exit()
+{
+
+ OF_exit();
+}
+
+void
+ppc_boot(str)
+ char *str;
+{
+
+ OF_boot(str);
+}
diff --git a/sys/powerpc/powerpc/ofwmagic.S b/sys/powerpc/powerpc/ofwmagic.S
new file mode 100644
index 0000000..890e86e
--- /dev/null
+++ b/sys/powerpc/powerpc/ofwmagic.S
@@ -0,0 +1,75 @@
+/* $FreeBSD$ */
+/* $NetBSD: ofwmagic.S,v 1.2 1997/10/09 08:38:18 jtc Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Magic note section used by OpenFirmware.
+ */
+
+ .section ".note"
+
+ # note header
+
+ # length of name
+ .long 8
+
+ # note descriptor size
+ .long 20
+
+ # note type (IEEE 1275)
+ .long 0x1275
+
+ # name of owner
+ .asciz "PowerPC"
+ .balign 4
+
+
+ # note descriptor
+
+ # real mode (-1) or virtual mode (0)
+ .long 0
+
+ # real-base
+ .long -1
+ # real-size
+ .long -1
+
+ # virt-base
+ .long -1
+ # virt-size
+ .long -1
diff --git a/sys/powerpc/powerpc/ofwmagic.s b/sys/powerpc/powerpc/ofwmagic.s
new file mode 100644
index 0000000..890e86e
--- /dev/null
+++ b/sys/powerpc/powerpc/ofwmagic.s
@@ -0,0 +1,75 @@
+/* $FreeBSD$ */
+/* $NetBSD: ofwmagic.S,v 1.2 1997/10/09 08:38:18 jtc Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Magic note section used by OpenFirmware.
+ */
+
+ .section ".note"
+
+ # note header
+
+ # length of name
+ .long 8
+
+ # note descriptor size
+ .long 20
+
+ # note type (IEEE 1275)
+ .long 0x1275
+
+ # name of owner
+ .asciz "PowerPC"
+ .balign 4
+
+
+ # note descriptor
+
+ # real mode (-1) or virtual mode (0)
+ .long 0
+
+ # real-base
+ .long -1
+ # real-size
+ .long -1
+
+ # virt-base
+ .long -1
+ # virt-size
+ .long -1
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
new file mode 100644
index 0000000..ae34d23
--- /dev/null
+++ b/sys/powerpc/powerpc/pmap.c
@@ -0,0 +1,1747 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ */
+ */
+/*
+ * Copyright (C) 2001 Benno Rice.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/msgbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
+
+#include <sys/user.h>
+
+#include <machine/pcb.h>
+#include <machine/powerpc.h>
+#include <machine/pte.h>
+
+pte_t *ptable;
+int ptab_cnt;
+u_int ptab_mask;
+#define HTABSIZE (ptab_cnt * 64)
+
+#define MINPV 2048
+
+struct pte_ovfl {
+ LIST_ENTRY(pte_ovfl) po_list; /* Linked list of overflow entries */
+ struct pte po_pte; /* PTE for this mapping */
+};
+
+LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */
+
+static struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+static int npgs;
+static u_int nextavail;
+
+#ifndef MSGBUFADDR
+extern vm_offset_t msgbuf_paddr;
+#endif
+
+static struct mem_region *mem, *avail;
+
+vm_offset_t avail_start;
+vm_offset_t avail_end;
+vm_offset_t virtual_avail;
+vm_offset_t virtual_end;
+
+vm_offset_t kernel_vm_end;
+
+static int pmap_pagedaemon_waken = 0;
+
+extern unsigned int Maxmem;
+
+#define ATTRSHFT 4
+
+struct pv_entry *pv_table;
+
+static vm_zone_t pvzone;
+static struct vm_zone pvzone_store;
+static struct vm_object pvzone_obj;
+static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
+static struct pv_entry *pvinit;
+
+#if !defined(PMAP_SHPGPERPROC)
+#define PMAP_SHPGPERPROC 200
+#endif
+
+struct pv_page;
+struct pv_page_info {
+ LIST_ENTRY(pv_page) pgi_list;
+ struct pv_entry *pgi_freelist;
+ int pgi_nfree;
+};
+#define NPVPPG ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
+struct pv_page {
+ struct pv_page_info pvp_pgi;
+ struct pv_entry pvp_pv[NPVPPG];
+};
+LIST_HEAD(pv_page_list, pv_page) pv_page_freelist;
+int pv_nfree;
+int pv_pcnt;
+static struct pv_entry *pmap_alloc_pv(void);
+static void pmap_free_pv(struct pv_entry *);
+
+struct po_page;
+struct po_page_info {
+ LIST_ENTRY(po_page) pgi_list;
+ vm_page_t pgi_page;
+ LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist;
+ int pgi_nfree;
+};
+#define NPOPPG ((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl))
+struct po_page {
+ struct po_page_info pop_pgi;
+ struct pte_ovfl pop_po[NPOPPG];
+};
+LIST_HEAD(po_page_list, po_page) po_page_freelist;
+int po_nfree;
+int po_pcnt;
+static struct pte_ovfl *poalloc(void);
+static void pofree(struct pte_ovfl *, int);
+
+static u_int usedsr[NPMAPS / sizeof(u_int) / 8];
+
+static int pmap_initialized;
+
+int pte_spill(vm_offset_t);
+
+/*
+ * These small routines may have to be replaced,
+ * if/when we support processors other that the 604.
+ */
+static __inline void
+tlbie(vm_offset_t ea)
+{
+
+ __asm __volatile ("tlbie %0" :: "r"(ea));
+}
+
+static __inline void
+tlbsync(void)
+{
+
+ __asm __volatile ("sync; tlbsync; sync");
+}
+
+static __inline void
+tlbia(void)
+{
+ vm_offset_t i;
+
+ __asm __volatile ("sync");
+ for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) {
+ tlbie(i);
+ }
+ tlbsync();
+}
+
+static __inline int
+ptesr(sr_t *sr, vm_offset_t addr)
+{
+
+ return sr[(u_int)addr >> ADDR_SR_SHFT];
+}
+
+static __inline int
+pteidx(sr_t sr, vm_offset_t addr)
+{
+ int hash;
+
+ hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
+ return hash & ptab_mask;
+}
+
+static __inline int
+ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which)
+{
+
+ return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) |
+ (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which);
+}
+
+static __inline struct pv_entry *
+pa_to_pv(vm_offset_t pa)
+{
+#if 0 /* XXX */
+ int bank, pg;
+
+ bank = vm_physseg_find(atop(pa), &pg);
+ if (bank == -1)
+ return NULL;
+ return &vm_physmem[bank].pmseg.pvent[pg];
+#endif
+ return (NULL);
+}
+
+static __inline char *
+pa_to_attr(vm_offset_t pa)
+{
+#if 0 /* XXX */
+ int bank, pg;
+
+ bank = vm_physseg_find(atop(pa), &pg);
+ if (bank == -1)
+ return NULL;
+ return &vm_physmem[bank].pmseg.attrs[pg];
+#endif
+ return (NULL);
+}
+
+/*
+ * Try to insert page table entry *pt into the ptable at idx.
+ *
+ * Note: *pt mustn't have PTE_VALID set.
+ * This is done here as required by Book III, 4.12.
+ */
+static int
+pte_insert(int idx, pte_t *pt)
+{
+ pte_t *ptp;
+ int i;
+
+ /*
+ * First try primary hash.
+ */
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (!(ptp->pte_hi & PTE_VALID)) {
+ *ptp = *pt;
+ ptp->pte_hi &= ~PTE_HID;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ return 1;
+ }
+ }
+
+ /*
+ * Then try secondary hash.
+ */
+
+ idx ^= ptab_mask;
+
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (!(ptp->pte_hi & PTE_VALID)) {
+ *ptp = *pt;
+ ptp->pte_hi |= PTE_HID;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Spill handler.
+ *
+ * Tries to spill a page table entry from the overflow area.
+ * Note that this routine runs in real mode on a separate stack,
+ * with interrupts disabled.
+ */
+int
+pte_spill(vm_offset_t addr)
+{
+ int idx, i;
+ sr_t sr;
+ struct pte_ovfl *po;
+ pte_t ps;
+ pte_t *pt;
+
+ __asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
+ idx = pteidx(sr, addr);
+ for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
+ if (ptematch(&po->po_pte, sr, addr, 0)) {
+ /*
+ * Now found an entry to be spilled into the real
+ * ptable.
+ */
+ if (pte_insert(idx, &po->po_pte)) {
+ LIST_REMOVE(po, po_list);
+ pofree(po, 0);
+ return 1;
+ }
+ /*
+ * Have to substitute some entry. Use the primary
+ * hash for this.
+ *
+ * Use low bits of timebase as random generator
+ */
+ __asm ("mftb %0" : "=r"(i));
+ pt = ptable + idx * 8 + (i & 7);
+ pt->pte_hi &= ~PTE_VALID;
+ ps = *pt;
+ __asm __volatile ("sync");
+ tlbie(addr);
+ tlbsync();
+ *pt = po->po_pte;
+ __asm __volatile ("sync");
+ pt->pte_hi |= PTE_VALID;
+ po->po_pte = ps;
+ if (ps.pte_hi & PTE_HID) {
+ /*
+ * We took an entry that was on the alternate
+ * hash chain, so move it to it's original
+ * chain.
+ */
+ po->po_pte.pte_hi &= ~PTE_HID;
+ LIST_REMOVE(po, po_list);
+ LIST_INSERT_HEAD(potable + (idx ^ ptab_mask),
+ po, po_list);
+ }
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This is called during powerpc_init, before the system is really initialized.
+ */
+void
+pmap_bootstrap(u_int kernelstart, u_int kernelend)
+{
+ struct mem_region *mp, *mp1;
+ int cnt, i;
+ u_int s, e, sz;
+
+ /*
+ * Get memory.
+ */
+ mem_regions(&mem, &avail);
+ for (mp = mem; mp->size; mp++)
+ Maxmem += btoc(mp->size);
+
+ /*
+ * Count the number of available entries.
+ */
+ for (cnt = 0, mp = avail; mp->size; mp++) {
+ cnt++;
+ }
+
+ /*
+ * Page align all regions.
+ * Non-page aligned memory isn't very interesting to us.
+ * Also, sort the entries for ascending addresses.
+ */
+ kernelstart &= ~PAGE_MASK;
+ kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK;
+ for (mp = avail; mp->size; mp++) {
+ s = mp->start;
+ e = mp->start + mp->size;
+ /*
+ * Check whether this region holds all of the kernel.
+ */
+ if (s < kernelstart && e > kernelend) {
+ avail[cnt].start = kernelend;
+ avail[cnt++].size = e - kernelend;
+ e = kernelstart;
+ }
+ /*
+ * Look whether this regions starts within the kernel.
+ */
+ if (s >= kernelstart && s < kernelend) {
+ if (e <= kernelend)
+ goto empty;
+ s = kernelend;
+ }
+ /*
+ * Now look whether this region ends within the kernel.
+ */
+ if (e > kernelstart && e <= kernelend) {
+ if (s >= kernelstart)
+ goto empty;
+ e = kernelstart;
+ }
+ /*
+ * Now page align the start and size of the region.
+ */
+ s = round_page(s);
+ e = trunc_page(e);
+ if (e < s) {
+ e = s;
+ }
+ sz = e - s;
+ /*
+ * Check whether some memory is left here.
+ */
+ if (sz == 0) {
+ empty:
+ bcopy(mp + 1, mp,
+ (cnt - (mp - avail)) * sizeof *mp);
+ cnt--;
+ mp--;
+ continue;
+ }
+
+ /*
+ * Do an insertion sort.
+ */
+ npgs += btoc(sz);
+
+ for (mp1 = avail; mp1 < mp; mp1++) {
+ if (s < mp1->start) {
+ break;
+ }
+ }
+
+ if (mp1 < mp) {
+ bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
+ mp1->start = s;
+ mp1->size = sz;
+ } else {
+ mp->start = s;
+ mp->size = sz;
+ }
+ }
+
+#ifdef HTABENTS
+ ptab_cnt = HTABENTS;
+#else
+ ptab_cnt = (Maxmem + 1) / 2;
+
+ /* The minimum is 1024 PTEGs. */
+ if (ptab_cnt < 1024) {
+ ptab_cnt = 1024;
+ }
+
+ /* Round up to power of 2. */
+ __asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1));
+ ptab_cnt = 1 << (32 - i);
+#endif
+
+ /*
+ * Find suitably aligned memory for HTAB.
+ */
+ for (mp = avail; mp->size; mp++) {
+ s = roundup(mp->start, HTABSIZE) - mp->start;
+
+ if (mp->size < s + HTABSIZE) {
+ continue;
+ }
+
+ ptable = (pte_t *)(mp->start + s);
+
+ if (mp->size == s + HTABSIZE) {
+ if (s)
+ mp->size = s;
+ else {
+ bcopy(mp + 1, mp,
+ (cnt - (mp - avail)) * sizeof *mp);
+ mp = avail;
+ }
+ break;
+ }
+
+ if (s != 0) {
+ bcopy(mp, mp + 1,
+ (cnt - (mp - avail)) * sizeof *mp);
+ mp++->size = s;
+ cnt++;
+ }
+
+ mp->start += s + HTABSIZE;
+ mp->size -= s + HTABSIZE;
+ break;
+ }
+
+ if (!mp->size) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(HTABSIZE);
+ bzero((void *)ptable, HTABSIZE);
+ ptab_mask = ptab_cnt - 1;
+
+ /*
+ * We cannot do pmap_steal_memory here,
+ * since we don't run with translation enabled yet.
+ */
+ s = sizeof(struct pte_ovtab) * ptab_cnt;
+ sz = round_page(s);
+
+ for (mp = avail; mp->size; mp++) {
+ if (mp->size >= sz) {
+ break;
+ }
+ }
+
+ if (!mp->size) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(sz);
+ potable = (struct pte_ovtab *)mp->start;
+ mp->size -= sz;
+ mp->start += sz;
+
+ if (mp->size <= 0) {
+ bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
+ }
+
+ for (i = 0; i < ptab_cnt; i++) {
+ LIST_INIT(potable + i);
+ }
+
+#ifndef MSGBUFADDR
+ /*
+ * allow for msgbuf
+ */
+ sz = round_page(MSGBUFSIZE);
+ mp = NULL;
+
+ for (mp1 = avail; mp1->size; mp1++) {
+ if (mp1->size >= sz) {
+ mp = mp1;
+ }
+ }
+
+ if (mp == NULL) {
+ panic("not enough memory?");
+ }
+
+ npgs -= btoc(sz);
+ msgbuf_paddr = mp->start + mp->size - sz;
+ mp->size -= sz;
+
+ if (mp->size <= 0) {
+ bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
+ }
+#endif
+
+ /*
+ * Initialize kernel pmap and hardware.
+ */
+ kernel_pmap = &kernel_pmap_store;
+
+ {
+ int batu, batl;
+
+ batu = 0x80001ffe;
+ batl = 0x80000012;
+
+ __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl));
+ }
+
+
+#if NPMAPS >= KERNEL_SEGMENT / 16
+ usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)]
+ |= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8));
+#endif
+
+#if 0 /* XXX */
+ for (i = 0; i < 16; i++) {
+ kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
+ __asm __volatile ("mtsrin %0,%1"
+ :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
+ }
+#endif
+
+ for (i = 0; i < 16; i++) {
+ int j;
+
+ __asm __volatile ("mfsrin %0,%1"
+ : "=r" (j)
+ : "r" (i << ADDR_SR_SHFT));
+
+ kernel_pmap->pm_sr[i] = j;
+ }
+
+ kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
+ __asm __volatile ("mtsr %0,%1"
+ :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
+
+ __asm __volatile ("sync; mtsdr1 %0; isync"
+ :: "r"((u_int)ptable | (ptab_mask >> 10)));
+
+ tlbia();
+
+ nextavail = avail->start;
+ avail_start = avail->start;
+ for (mp = avail, i = 0; mp->size; mp++) {
+ avail_end = mp->start + mp->size;
+ phys_avail[i++] = mp->start;
+ phys_avail[i++] = mp->start + mp->size;
+ }
+
+ virtual_avail = VM_MIN_KERNEL_ADDRESS;
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+}
+
+/*
+ * Initialize anything else for pmap handling.
+ * Called during vm_init().
+ */
+void
+pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
+{
+ int initial_pvs;
+
+ /*
+ * init the pv free list
+ */
+ initial_pvs = vm_page_array_size;
+ if (initial_pvs < MINPV) {
+ initial_pvs = MINPV;
+ }
+ pvzone = &pvzone_store;
+ pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
+ initial_pvs * sizeof(struct pv_entry));
+ zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit,
+ vm_page_array_size);
+
+ pmap_initialized = TRUE;
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure.
+ */
+void
+pmap_pinit(struct pmap *pm)
+{
+ int i, j;
+
+ /*
+ * Allocate some segment registers for this pmap.
+ */
+ pm->pm_refs = 1;
+ for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) {
+ if (usedsr[i] != 0xffffffff) {
+ j = ffs(~usedsr[i]) - 1;
+ usedsr[i] |= 1 << j;
+ pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16;
+ for (i = 1; i < 16; i++) {
+ pm->pm_sr[i] = pm->pm_sr[i - 1] + 1;
+ }
+ return;
+ }
+ }
+ panic("out of segments");
+}
+
+void
+pmap_pinit2(pmap_t pmap)
+{
+
+ /*
+ * Nothing to be done.
+ */
+ return;
+}
+
+/*
+ * Add a reference to the given pmap.
+ */
+void
+pmap_reference(struct pmap *pm)
+{
+
+ pm->pm_refs++;
+}
+
+/*
+ * Retire the given pmap from service.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_destroy(struct pmap *pm)
+{
+
+ if (--pm->pm_refs == 0) {
+ pmap_release(pm);
+ free((caddr_t)pm, M_VMPGDATA);
+ }
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ */
+void
+pmap_release(struct pmap *pm)
+{
+ int i, j;
+
+ if (!pm->pm_sr[0]) {
+ panic("pmap_release");
+ }
+ i = pm->pm_sr[0] / 16;
+ j = i % (sizeof usedsr[0] * 8);
+ i /= sizeof usedsr[0] * 8;
+ usedsr[i] &= ~(1 << j);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr,
+ vm_size_t len, vm_offset_t src_addr)
+{
+
+ return;
+}
+
+/*
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(void)
+{
+
+ return;
+}
+
+/*
+ * Fill the given physical page with zeroes.
+ */
+void
+pmap_zero_page(vm_offset_t pa)
+{
+#if 0
+ bzero((caddr_t)pa, PAGE_SIZE);
+#else
+ int i;
+
+ for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
+ __asm __volatile ("dcbz 0,%0" :: "r"(pa));
+ pa += CACHELINESIZE;
+ }
+#endif
+}
+
+void
+pmap_zero_page_area(vm_offset_t pa, int off, int size)
+{
+
+ bzero((caddr_t)pa + off, size);
+}
+
+/*
+ * Copy the given physical source page to its destination.
+ */
+void
+pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+{
+
+ bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE);
+}
+
+static struct pv_entry *
+pmap_alloc_pv()
+{
+ pv_entry_count++;
+
+ if (pv_entry_high_water &&
+ (pv_entry_count > pv_entry_high_water) &&
+ (pmap_pagedaemon_waken == 0)) {
+ pmap_pagedaemon_waken = 1;
+ wakeup(&vm_pages_needed);
+ }
+
+ return zalloc(pvzone);
+}
+
+static void
+pmap_free_pv(struct pv_entry *pv)
+{
+
+ pv_entry_count--;
+ zfree(pvzone, pv);
+}
+
+/*
+ * We really hope that we don't need overflow entries
+ * before the VM system is initialized!
+ *
+ * XXX: Should really be switched over to the zone allocator.
+ */
+static struct pte_ovfl *
+poalloc()
+{
+ struct po_page *pop;
+ struct pte_ovfl *po;
+ vm_page_t mem;
+ int i;
+
+ if (!pmap_initialized) {
+ panic("poalloc");
+ }
+
+ if (po_nfree == 0) {
+ /*
+ * Since we cannot use maps for potable allocation,
+ * we have to steal some memory from the VM system. XXX
+ */
+ mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM);
+ po_pcnt++;
+ pop = (struct po_page *)VM_PAGE_TO_PHYS(mem);
+ pop->pop_pgi.pgi_page = mem;
+ LIST_INIT(&pop->pop_pgi.pgi_freelist);
+ for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) {
+ LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po,
+ po_list);
+ }
+ po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1;
+ LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
+ po = pop->pop_po;
+ } else {
+ po_nfree--;
+ pop = po_page_freelist.lh_first;
+ if (--pop->pop_pgi.pgi_nfree <= 0) {
+ LIST_REMOVE(pop, pop_pgi.pgi_list);
+ }
+ po = pop->pop_pgi.pgi_freelist.lh_first;
+ LIST_REMOVE(po, po_list);
+ }
+
+ return po;
+}
+
+static void
+pofree(struct pte_ovfl *po, int freepage)
+{
+ struct po_page *pop;
+
+ pop = (struct po_page *)trunc_page((vm_offset_t)po);
+ switch (++pop->pop_pgi.pgi_nfree) {
+ case NPOPPG:
+ if (!freepage) {
+ break;
+ }
+ po_nfree -= NPOPPG - 1;
+ po_pcnt--;
+ LIST_REMOVE(pop, pop_pgi.pgi_list);
+ vm_page_free(pop->pop_pgi.pgi_page);
+ return;
+ case 1:
+ LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
+ default:
+ break;
+ }
+ LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list);
+ po_nfree++;
+}
+
+/*
+ * This returns whether this is the first mapping of a page.
+ */
+static int
+pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa)
+{
+ struct pv_entry *pv, *npv;
+ int s, first;
+
+ if (!pmap_initialized) {
+ return 0;
+ }
+
+ s = splimp();
+
+ pv = pa_to_pv(pa);
+ first = pv->pv_idx;
+ if (pv->pv_idx == -1) {
+ /*
+ * No entries yet, use header as the first entry.
+ */
+ pv->pv_va = va;
+ pv->pv_idx = pteidx;
+ pv->pv_next = NULL;
+ } else {
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ npv = pmap_alloc_pv();
+ npv->pv_va = va;
+ npv->pv_idx = pteidx;
+ npv->pv_next = pv->pv_next;
+ pv->pv_next = npv;
+ }
+ splx(s);
+ return first;
+}
+
+static void
+pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte)
+{
+ struct pv_entry *pv, *npv;
+ char *attr;
+
+ /*
+ * First transfer reference/change bits to cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return;
+ }
+ *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT;
+
+ /*
+ * Remove from the PV table.
+ */
+ pv = pa_to_pv(pa);
+
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pteidx == pv->pv_idx && va == pv->pv_va) {
+ npv = pv->pv_next;
+ if (npv) {
+ *pv = *npv;
+ pmap_free_pv(npv);
+ } else {
+ pv->pv_idx = -1;
+ }
+ } else {
+ for (; (npv = pv->pv_next); pv = npv) {
+ if (pteidx == npv->pv_idx && va == npv->pv_va) {
+ break;
+ }
+ }
+ if (npv) {
+ pv->pv_next = npv->pv_next;
+ pmap_free_pv(npv);
+ }
+#ifdef DIAGNOSTIC
+ else {
+ panic("pmap_remove_pv: not on list\n");
+ }
+#endif
+ }
+}
+
+/*
+ * Insert physical page at pa into the given pmap at virtual address va.
+ */
+void
+pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot,
+ boolean_t wired)
+{
+ sr_t sr;
+ int idx, s;
+ pte_t pte;
+ struct pte_ovfl *po;
+ struct mem_region *mp;
+ vm_offset_t pa;
+
+ pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK;
+
+ /*
+ * Have to remove any existing mapping first.
+ */
+ pmap_remove(pm, va, va + PAGE_SIZE);
+
+ /*
+ * Compute the HTAB index.
+ */
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ /*
+ * Construct the PTE.
+ *
+ * Note: Don't set the valid bit for correct operation of tlb update.
+ */
+ pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
+ | ((va & ADDR_PIDX) >> ADDR_API_SHFT);
+ pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G;
+
+ for (mp = mem; mp->size; mp++) {
+ if (pa >= mp->start && pa < mp->start + mp->size) {
+ pte.pte_lo &= ~(PTE_I | PTE_G);
+ break;
+ }
+ }
+ if (prot & VM_PROT_WRITE) {
+ pte.pte_lo |= PTE_RW;
+ } else {
+ pte.pte_lo |= PTE_RO;
+ }
+
+ /*
+ * Now record mapping for later back-translation.
+ */
+ if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) {
+ if (pmap_enter_pv(idx, va, pa)) {
+ /*
+ * Flush the real memory from the cache.
+ */
+ __syncicache((void *)pa, PAGE_SIZE);
+ }
+ }
+
+ s = splimp();
+ pm->pm_stats.resident_count++;
+ /*
+ * Try to insert directly into HTAB.
+ */
+ if (pte_insert(idx, &pte)) {
+ splx(s);
+ return;
+ }
+
+ /*
+ * Have to allocate overflow entry.
+ *
+ * Note, that we must use real addresses for these.
+ */
+ po = poalloc();
+ po->po_pte = pte;
+ LIST_INSERT_HEAD(potable + idx, po, po_list);
+ splx(s);
+}
+
+void
+pmap_kenter(vm_offset_t va, vm_offset_t pa)
+{
+ struct vm_page pg;
+
+ pg.phys_addr = pa;
+ pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+}
+
+void
+pmap_kremove(vm_offset_t va)
+{
+ pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
+}
+
+/*
+ * Remove the given range of mapping entries.
+ */
+void
+pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva)
+{
+ int idx, i, s;
+ sr_t sr;
+ pte_t *ptp;
+ struct pte_ovfl *po, *npo;
+
+ s = splimp();
+ while (va < endva) {
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID)) {
+ pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ pm->pm_stats.resident_count--;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
+ ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
+ pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ pm->pm_stats.resident_count--;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = npo) {
+ npo = po->po_list.le_next;
+ if (ptematch(&po->po_pte, sr, va, 0)) {
+ pmap_remove_pv(idx, va, po->po_pte.pte_lo,
+ &po->po_pte);
+ LIST_REMOVE(po, po_list);
+ pofree(po, 1);
+ pm->pm_stats.resident_count--;
+ }
+ }
+ va += PAGE_SIZE;
+ }
+ splx(s);
+}
+
+static pte_t *
+pte_find(struct pmap *pm, vm_offset_t va)
+{
+ int idx, i;
+ sr_t sr;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+
+ idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID)) {
+ return ptp;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) {
+ if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
+ return ptp;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
+ if (ptematch(&po->po_pte, sr, va, 0)) {
+ return &po->po_pte;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Get the physical page address for the given pmap/virtual address.
+ */
+vm_offset_t
+pmap_extract(pmap_t pm, vm_offset_t va)
+{
+ pte_t *ptp;
+ int s;
+
+ s = splimp();
+
+ if (!(ptp = pte_find(pm, va))) {
+ splx(s);
+ return (0);
+ }
+ splx(s);
+ return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF));
+}
+
+/*
+ * Lower the protection on the specified range of this pmap.
+ *
+ * There are only two cases: either the protection is going to 0,
+ * or it is going to read-only.
+ */
+void
+pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ pte_t *ptp;
+ int valid, s;
+
+ if (prot & VM_PROT_READ) {
+ s = splimp();
+ while (sva < eva) {
+ ptp = pte_find(pm, sva);
+ if (ptp) {
+ valid = ptp->pte_hi & PTE_VALID;
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(sva);
+ tlbsync();
+ ptp->pte_lo &= ~PTE_PP;
+ ptp->pte_lo |= PTE_RO;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= valid;
+ }
+ sva += PAGE_SIZE;
+ }
+ splx(s);
+ return;
+ }
+ pmap_remove(pm, sva, eva);
+}
+
+boolean_t
+ptemodify(vm_page_t pg, u_int mask, u_int val)
+{
+ vm_offset_t pa;
+ struct pv_entry *pv;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+ int i, s;
+ char *attr;
+ int rv;
+
+ pa = VM_PAGE_TO_PHYS(pg);
+
+ /*
+ * First modify bits in cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return FALSE;
+ }
+
+ *attr &= ~mask >> ATTRSHFT;
+ *attr |= val >> ATTRSHFT;
+
+ pv = pa_to_pv(pa);
+ if (pv->pv_idx < 0) {
+ return FALSE;
+ }
+
+ rv = FALSE;
+ s = splimp();
+ for (; pv; pv = pv->pv_next) {
+ for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(pv->pv_va);
+ tlbsync();
+ rv |= ptp->pte_lo & mask;
+ ptp->pte_lo &= ~mask;
+ ptp->pte_lo |= val;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ }
+ }
+ for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
+ --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(pv->pv_va);
+ tlbsync();
+ rv |= ptp->pte_lo & mask;
+ ptp->pte_lo &= ~mask;
+ ptp->pte_lo |= val;
+ __asm __volatile ("sync");
+ ptp->pte_hi |= PTE_VALID;
+ }
+ }
+ for (po = potable[pv->pv_idx].lh_first; po;
+ po = po->po_list.le_next) {
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ rv |= ptp->pte_lo & mask;
+ po->po_pte.pte_lo &= ~mask;
+ po->po_pte.pte_lo |= val;
+ }
+ }
+ }
+ splx(s);
+ return rv != 0;
+}
+
+int
+ptebits(vm_page_t pg, int bit)
+{
+ struct pv_entry *pv;
+ pte_t *ptp;
+ struct pte_ovfl *po;
+ int i, s, bits;
+ char *attr;
+ vm_offset_t pa;
+
+ bits = 0;
+ pa = VM_PAGE_TO_PHYS(pg);
+
+ /*
+ * First try the cache.
+ */
+ attr = pa_to_attr(pa);
+ if (attr == NULL) {
+ return 0;
+ }
+ bits |= (*attr << ATTRSHFT) & bit;
+ if (bits == bit) {
+ return bits;
+ }
+
+ pv = pa_to_pv(pa);
+ if (pv->pv_idx < 0) {
+ return 0;
+ }
+
+ s = splimp();
+ for (; pv; pv = pv->pv_next) {
+ for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ bits |= ptp->pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
+ --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ bits |= ptp->pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ for (po = potable[pv->pv_idx].lh_first; po;
+ po = po->po_list.le_next) {
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ bits |= po->po_pte.pte_lo & bit;
+ if (bits == bit) {
+ splx(s);
+ return bits;
+ }
+ }
+ }
+ }
+ splx(s);
+ return bits;
+}
+
+/*
+ * Lower the protection on the specified physical page.
+ *
+ * There are only two cases: either the protection is going to 0,
+ * or it is going to read-only.
+ */
+void
+pmap_page_protect(vm_page_t m, vm_prot_t prot)
+{
+ vm_offset_t pa;
+ vm_offset_t va;
+ pte_t *ptp;
+ struct pte_ovfl *po, *npo;
+ int i, s, idx;
+ struct pv_entry *pv;
+
+ pa = VM_PAGE_TO_PHYS(m);
+
+ pa &= ~ADDR_POFF;
+ if (prot & VM_PROT_READ) {
+ ptemodify(m, PTE_PP, PTE_RO);
+ return;
+ }
+
+ pv = pa_to_pv(pa);
+ if (pv == NULL) {
+ return;
+ }
+
+ s = splimp();
+ while (pv->pv_idx >= 0) {
+ idx = pv->pv_idx;
+ va = pv->pv_va;
+ for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ goto next;
+ }
+ }
+ for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
+ ptp++) {
+ if ((ptp->pte_hi & PTE_VALID)
+ && (ptp->pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, ptp);
+ ptp->pte_hi &= ~PTE_VALID;
+ __asm __volatile ("sync");
+ tlbie(va);
+ tlbsync();
+ goto next;
+ }
+ }
+ for (po = potable[idx].lh_first; po; po = npo) {
+ npo = po->po_list.le_next;
+ if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
+ pmap_remove_pv(idx, va, pa, &po->po_pte);
+ LIST_REMOVE(po, po_list);
+ pofree(po, 1);
+ goto next;
+ }
+ }
+next:
+ }
+ splx(s);
+}
+
+/*
+ * Activate the address space for the specified process. If the process
+ * is the current process, load the new MMU context.
+ */
+void
+pmap_activate(struct proc *p)
+{
+ struct pcb *pcb;
+ pmap_t pmap;
+ pmap_t rpm;
+ int psl, i, ksr, seg;
+
+ pcb = &p->p_addr->u_pcb;
+ pmap = p->p_vmspace->vm_map.pmap;
+
+ /*
+ * XXX Normally performed in cpu_fork().
+ */
+ if (pcb->pcb_pm != pmap) {
+ pcb->pcb_pm = pmap;
+ (vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap,
+ (vm_offset_t)pcb->pcb_pm);
+ }
+
+ if (p == curproc) {
+ /* Disable interrupts while switching. */
+ __asm __volatile("mfmsr %0" : "=r"(psl) :);
+ psl &= ~PSL_EE;
+ __asm __volatile("mtmsr %0" :: "r"(psl));
+
+#if 0 /* XXX */
+ /* Store pointer to new current pmap. */
+ curpm = pcb->pcb_pmreal;
+#endif
+
+ /* Save kernel SR. */
+ __asm __volatile("mfsr %0,14" : "=r"(ksr) :);
+
+ /*
+ * Set new segment registers. We use the pmap's real
+ * address to avoid accessibility problems.
+ */
+ rpm = pcb->pcb_pmreal;
+ for (i = 0; i < 16; i++) {
+ seg = rpm->pm_sr[i];
+ __asm __volatile("mtsrin %0,%1"
+ :: "r"(seg), "r"(i << ADDR_SR_SHFT));
+ }
+
+ /* Restore kernel SR. */
+ __asm __volatile("mtsr 14,%0" :: "r"(ksr));
+
+ /* Interrupts are OK again. */
+ psl |= PSL_EE;
+ __asm __volatile("mtmsr %0" :: "r"(psl));
+ }
+}
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ */
+void
+pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vm_offset_t tva = va + i * PAGE_SIZE;
+ pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i]));
+ }
+}
+
+/*
+ * this routine jerks page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ */
+void
+pmap_qremove(vm_offset_t va, int count)
+{
+ vm_offset_t end_va;
+
+ end_va = va + count*PAGE_SIZE;
+
+ while (va < end_va) {
+ unsigned *pte;
+
+ pte = (unsigned *)vtopte(va);
+ *pte = 0;
+ tlbie(va);
+ va += PAGE_SIZE;
+ }
+}
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return the count of reference bits for a page, clearing all of them.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+
+ /* XXX: coming soon... */
+ return (0);
+}
+
+/*
+ * this routine returns true if a physical page resides
+ * in the given pmap.
+ */
+boolean_t
+pmap_page_exists(pmap_t pmap, vm_page_t m)
+{
+#if 0 /* XXX: This must go! */
+ register pv_entry_t pv;
+ int s;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return FALSE;
+
+ s = splvm();
+
+ /*
+ * Not found, check current mappings returning immediately if found.
+ */
+ for (pv = pv_table; pv; pv = pv->pv_next) {
+ if (pv->pv_pmap == pmap) {
+ splx(s);
+ return TRUE;
+ }
+ }
+ splx(s);
+#endif
+ return (FALSE);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+{
+ vm_offset_t sva, va;
+
+ sva = *virt;
+ va = sva;
+
+ while (start < end) {
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+
+ *virt = va;
+ return (sva);
+}
+
+vm_offset_t
+pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
+{
+
+ return (addr);
+}
+
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr)
+{
+
+ /* XXX: coming soon... */
+ return (0);
+}
+
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size, int limit)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_growkernel(vm_offset_t addr)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+/*
+ * Initialize the address space (zone) for the pv_entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
+ */
+void
+pmap_init2()
+{
+ pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
+ pv_entry_high_water = 9 * (pv_entry_max / 10);
+ zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
+}
+
+void
+pmap_swapin_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_swapout_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_new_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable)
+{
+
+ return;
+}
+
+void
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_pinit0(pmap_t pmap)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+void
+pmap_dispose_proc(struct proc *p)
+{
+
+ /* XXX: coming soon... */
+ return;
+}
+
+vm_offset_t
+pmap_steal_memory(vm_size_t size)
+{
+ vm_size_t bank_size;
+ vm_offset_t pa;
+
+ size = round_page(size);
+
+ bank_size = phys_avail[1] - phys_avail[0];
+ while (size > bank_size) {
+ int i;
+ for (i = 0; phys_avail[i+2]; i+= 2) {
+ phys_avail[i] = phys_avail[i+2];
+ phys_avail[i+1] = phys_avail[i+3];
+ }
+ phys_avail[i] = 0;
+ phys_avail[i+1] = 0;
+ if (!phys_avail[0])
+ panic("pmap_steal_memory: out of memory");
+ bank_size = phys_avail[1] - phys_avail[0];
+ }
+
+ pa = phys_avail[0];
+ phys_avail[0] += size;
+
+ bzero((caddr_t) pa, size);
+ return pa;
+}
diff --git a/sys/powerpc/powerpc/subyte.c b/sys/powerpc/powerpc/subyte.c
new file mode 100644
index 0000000..0568f52
--- /dev/null
+++ b/sys/powerpc/powerpc/subyte.c
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (C) 1993 Wolfgang Solfrank.
+ * Copyright (C) 1993 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: subyte.c,v 1.2 2000/06/09 14:05:47 kleink Exp $ */
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/systm.h>
+
+/*
+ * Emulate subyte.
+ */
+
+int
+subyte(void *base, int byte)
+{
+
+ if (copyout(&byte, base, 1)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+suibyte(void *base, int byte)
+{
+
+ return subyte(base, byte);
+}
diff --git a/sys/powerpc/powerpc/suswintr.c b/sys/powerpc/powerpc/suswintr.c
new file mode 100644
index 0000000..4a0f5c1
--- /dev/null
+++ b/sys/powerpc/powerpc/suswintr.c
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (C) 1994 Wolfgang Solfrank.
+ * Copyright (C) 1994 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: suswintr.c,v 1.2 2000/06/09 14:05:48 kleink Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/resourcevar.h>
+
+/*
+ * Emulate suswintr
+ *
+ * Simply return fault for all cases
+ */
+int
+suswintr(void *addr, int s)
+{
+
+ return -1;
+}
diff --git a/sys/powerpc/powerpc/suword.c b/sys/powerpc/powerpc/suword.c
new file mode 100644
index 0000000..e0178c5
--- /dev/null
+++ b/sys/powerpc/powerpc/suword.c
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (C) 1993 Wolfgang Solfrank.
+ * Copyright (C) 1993 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: suword.c,v 1.2 2000/06/09 14:05:49 kleink Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/systm.h>
+
+/*
+ * Emulate suword
+ */
+int
+suword(void *addr, long l)
+{
+
+ if (copyout(&l, addr, sizeof(l))) {
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/sys/powerpc/powerpc/swtch.S b/sys/powerpc/powerpc/swtch.S
new file mode 100644
index 0000000..34bdb85
--- /dev/null
+++ b/sys/powerpc/powerpc/swtch.S
@@ -0,0 +1,249 @@
+/* $FreeBSD$ */
+/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
+
+/*
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.s"
+
+#include <sys/syscall.h>
+
+#include <machine/trap.h>
+#include <machine/param.h>
+#include <machine/pmap.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+
+/*
+ * Some instructions gas doesn't understand (yet?)
+ */
+#define bdneq bdnzf 2,
+
+/*
+ * No processes are runnable, so loop waiting for one.
+ * Separate label here for accounting purposes.
+ */
+#if 0 /* XXX: I think this is now unneeded. Leaving it in just in case. */
+ASENTRY(Idle)
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l /* disable interrupts while
+ manipulating runque */
+ mtmsr 3
+
+ lis 8,sched_whichqs@ha
+ lwz 9,sched_whichqs@l(8)
+
+ or. 9,9,9
+ bne- .Lsw1 /* at least one queue non-empty */
+
+ ori 3,3,PSL_EE@l /* reenable ints again */
+ mtmsr 3
+ isync
+
+/* Check if we can use power saving mode */
+ lis 8,powersave@ha
+ lwz 9,powersave@l(8)
+
+ or. 9,9,9
+ beq 1f
+
+ sync
+ oris 3,3,PSL_POW@h /* enter power saving mode */
+ mtmsr 3
+ isync
+1:
+ b _ASM_LABEL(Idle)
+#endif /* XXX */
+
+/*
+ * switchexit gets called from cpu_exit to complete the exit procedure.
+ */
+ENTRY(switchexit)
+/* First switch to the idle pcb/kernel stack */
+#if 0 /* XXX */
+ lis 6,idle_u@ha
+ lwz 6,idle_u@l(6)
+ mfsprg 7,0
+ stw 6,GD_CURPCB(7)
+#endif
+ addi 1,6,USPACE-16 /* 16 bytes are reserved at stack top */
+ /*
+ * Schedule the vmspace and stack to be freed (the proc arg is
+ * already in r3).
+ */
+ bl sys_exit
+
+/* Fall through to cpu_switch to actually select another proc */
+ li 3,0 /* indicate exited process */
+
+/*
+ * void cpu_switch(struct proc *p)
+ * Find a runnable process and switch to it.
+ */
+/* XXX noprofile? --thorpej@netbsd.org */
+ENTRY(cpu_switch)
+ mflr 0 /* save lr */
+ stw 0,4(1)
+ stwu 1,-16(1)
+ stw 31,12(1)
+ stw 30,8(1)
+
+ mr 30,3
+ mfsprg 3,0
+ xor 31,31,31
+ stw 31,GD_CURPROC(3) /* Zero to not accumulate cpu time */
+ mfsprg 3,0
+ lwz 31,GD_CURPCB(3)
+
+ xor 3,3,3
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+ stw 3,PCB_SPL(31) /* save spl */
+
+/* Find a new process */
+ bl chooseproc
+
+1:
+ /* just did this resched thing */
+ xor 3,3,3
+ lis 4,want_resched@ha
+ stw 3,want_resched@l(4)
+
+ /* record new process */
+ mfsprg 4,0
+ stw 3,GD_CURPROC(4)
+
+ cmpl 0,31,30 /* is it the same process? */
+ beq switch_return
+
+ or. 30,30,30 /* old process was exiting? */
+ beq switch_exited
+
+ mfsr 10,USER_SR /* save USER_SR for copyin/copyout */
+ mfcr 11 /* save cr */
+ mr 12,2 /* save r2 */
+ stwu 1,-SFRAMELEN(1) /* still running on old stack */
+ stmw 10,8(1)
+ lwz 3,P_ADDR(30)
+ stw 1,PCB_SP(3) /* save SP */
+
+switch_exited:
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l /* disable interrupts while
+ actually switching */
+ mtmsr 3
+
+ /* indicate new pcb */
+ lwz 4,P_ADDR(31)
+ mfsprg 5,0
+ stw 4,GD_CURPCB(5)
+
+#if 0 /* XXX */
+ /* save real pmap pointer for spill fill */
+ lwz 5,PCB_PMR(4)
+ lis 6,curpm@ha
+ stwu 5,curpm@l(6)
+ stwcx. 5,0,6 /* clear possible reservation */
+#endif
+
+ addic. 5,5,64
+ li 6,0
+ mfsr 8,KERNEL_SR /* save kernel SR */
+1:
+ addis 6,6,-0x10000000@ha /* set new procs segment registers */
+ or. 6,6,6 /* This is done from the real
+ address pmap */
+ lwzu 7,-4(5) /* so we don't have to worry */
+ mtsrin 7,6 /* about accessibility */
+ bne 1b
+ mtsr KERNEL_SR,8 /* restore kernel SR */
+ isync
+
+ lwz 1,PCB_SP(4) /* get new procs SP */
+
+ ori 3,3,PSL_EE@l /* interrupts are okay again */
+ mtmsr 3
+
+ lmw 10,8(1) /* get other regs */
+ lwz 1,0(1) /* get saved SP */
+ mr 2,12 /* get saved r2 */
+ mtcr 11 /* get saved cr */
+ isync
+ mtsr USER_SR,10 /* get saved USER_SR */
+ isync
+
+switch_return:
+ mr 30,7 /* save proc pointer */
+ lwz 3,PCB_SPL(4)
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+
+ mr 3,30 /* get curproc for special fork
+ returns */
+
+ lwz 31,12(1)
+ lwz 30,8(1)
+ addi 1,1,16
+ lwz 0,4(1)
+ mtlr 0
+ blr
+
+/*
+ * Fake savectx for the time being.
+ */
+ENTRY(savectx)
+ blr
diff --git a/sys/powerpc/powerpc/swtch.s b/sys/powerpc/powerpc/swtch.s
new file mode 100644
index 0000000..34bdb85
--- /dev/null
+++ b/sys/powerpc/powerpc/swtch.s
@@ -0,0 +1,249 @@
+/* $FreeBSD$ */
+/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
+
+/*
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.s"
+
+#include <sys/syscall.h>
+
+#include <machine/trap.h>
+#include <machine/param.h>
+#include <machine/pmap.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+
+/*
+ * Some instructions gas doesn't understand (yet?)
+ */
+#define bdneq bdnzf 2,
+
+/*
+ * No processes are runnable, so loop waiting for one.
+ * Separate label here for accounting purposes.
+ */
+#if 0 /* XXX: I think this is now unneeded. Leaving it in just in case. */
+ASENTRY(Idle)
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l /* disable interrupts while
+ manipulating runque */
+ mtmsr 3
+
+ lis 8,sched_whichqs@ha
+ lwz 9,sched_whichqs@l(8)
+
+ or. 9,9,9
+ bne- .Lsw1 /* at least one queue non-empty */
+
+ ori 3,3,PSL_EE@l /* reenable ints again */
+ mtmsr 3
+ isync
+
+/* Check if we can use power saving mode */
+ lis 8,powersave@ha
+ lwz 9,powersave@l(8)
+
+ or. 9,9,9
+ beq 1f
+
+ sync
+ oris 3,3,PSL_POW@h /* enter power saving mode */
+ mtmsr 3
+ isync
+1:
+ b _ASM_LABEL(Idle)
+#endif /* XXX */
+
+/*
+ * switchexit gets called from cpu_exit to complete the exit procedure.
+ */
+ENTRY(switchexit)
+/* First switch to the idle pcb/kernel stack */
+#if 0 /* XXX */
+ lis 6,idle_u@ha
+ lwz 6,idle_u@l(6)
+ mfsprg 7,0
+ stw 6,GD_CURPCB(7)
+#endif
+ addi 1,6,USPACE-16 /* 16 bytes are reserved at stack top */
+ /*
+ * Schedule the vmspace and stack to be freed (the proc arg is
+ * already in r3).
+ */
+ bl sys_exit
+
+/* Fall through to cpu_switch to actually select another proc */
+ li 3,0 /* indicate exited process */
+
+/*
+ * void cpu_switch(struct proc *p)
+ * Find a runnable process and switch to it.
+ */
+/* XXX noprofile? --thorpej@netbsd.org */
+ENTRY(cpu_switch)
+ mflr 0 /* save lr */
+ stw 0,4(1)
+ stwu 1,-16(1)
+ stw 31,12(1)
+ stw 30,8(1)
+
+ mr 30,3
+ mfsprg 3,0
+ xor 31,31,31
+ stw 31,GD_CURPROC(3) /* Zero to not accumulate cpu time */
+ mfsprg 3,0
+ lwz 31,GD_CURPCB(3)
+
+ xor 3,3,3
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+ stw 3,PCB_SPL(31) /* save spl */
+
+/* Find a new process */
+ bl chooseproc
+
+1:
+ /* just did this resched thing */
+ xor 3,3,3
+ lis 4,want_resched@ha
+ stw 3,want_resched@l(4)
+
+ /* record new process */
+ mfsprg 4,0
+ stw 3,GD_CURPROC(4)
+
+ cmpl 0,31,30 /* is it the same process? */
+ beq switch_return
+
+ or. 30,30,30 /* old process was exiting? */
+ beq switch_exited
+
+ mfsr 10,USER_SR /* save USER_SR for copyin/copyout */
+ mfcr 11 /* save cr */
+ mr 12,2 /* save r2 */
+ stwu 1,-SFRAMELEN(1) /* still running on old stack */
+ stmw 10,8(1)
+ lwz 3,P_ADDR(30)
+ stw 1,PCB_SP(3) /* save SP */
+
+switch_exited:
+ mfmsr 3
+ andi. 3,3,~PSL_EE@l /* disable interrupts while
+ actually switching */
+ mtmsr 3
+
+ /* indicate new pcb */
+ lwz 4,P_ADDR(31)
+ mfsprg 5,0
+ stw 4,GD_CURPCB(5)
+
+#if 0 /* XXX */
+ /* save real pmap pointer for spill fill */
+ lwz 5,PCB_PMR(4)
+ lis 6,curpm@ha
+ stwu 5,curpm@l(6)
+ stwcx. 5,0,6 /* clear possible reservation */
+#endif
+
+ addic. 5,5,64
+ li 6,0
+ mfsr 8,KERNEL_SR /* save kernel SR */
+1:
+ addis 6,6,-0x10000000@ha /* set new procs segment registers */
+ or. 6,6,6 /* This is done from the real
+ address pmap */
+ lwzu 7,-4(5) /* so we don't have to worry */
+ mtsrin 7,6 /* about accessibility */
+ bne 1b
+ mtsr KERNEL_SR,8 /* restore kernel SR */
+ isync
+
+ lwz 1,PCB_SP(4) /* get new procs SP */
+
+ ori 3,3,PSL_EE@l /* interrupts are okay again */
+ mtmsr 3
+
+ lmw 10,8(1) /* get other regs */
+ lwz 1,0(1) /* get saved SP */
+ mr 2,12 /* get saved r2 */
+ mtcr 11 /* get saved cr */
+ isync
+ mtsr USER_SR,10 /* get saved USER_SR */
+ isync
+
+switch_return:
+ mr 30,7 /* save proc pointer */
+ lwz 3,PCB_SPL(4)
+#if 0 /* XXX */
+ bl lcsplx
+#endif
+
+ mr 3,30 /* get curproc for special fork
+ returns */
+
+ lwz 31,12(1)
+ lwz 30,8(1)
+ addi 1,1,16
+ lwz 0,4(1)
+ mtlr 0
+ blr
+
+/*
+ * Fake savectx for the time being.
+ */
+ENTRY(savectx)
+ blr
diff --git a/sys/powerpc/powerpc/syncicache.c b/sys/powerpc/powerpc/syncicache.c
new file mode 100644
index 0000000..f5c93b3
--- /dev/null
+++ b/sys/powerpc/powerpc/syncicache.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 1995-1997, 1999 Wolfgang Solfrank.
+ * Copyright (C) 1995-1997, 1999 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: syncicache.c,v 1.2 1999/05/05 12:36:40 tsubai Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include <sys/param.h>
+#if defined(_KERNEL) || defined(_STANDALONE)
+#include <sys/time.h>
+#include <sys/proc.h>
+#include <vm/vm.h>
+#endif
+#include <sys/sysctl.h>
+
+#include <machine/cpu.h>
+
+#if defined(_KERNEL) || defined(_STANDALONE)
+#ifndef CACHELINESIZE
+#error "Must know the size of a cache line"
+#endif
+#else
+static void getcachelinesize __P((void));
+
+static int _cachelinesize;
+#define CACHELINESIZE _cachelinesize
+
+static void
+getcachelinesize()
+{
+ static int cachemib[] = { CTL_MACHDEP, CPU_CACHELINE };
+ int clen;
+
+ clen = sizeof(_cachelinesize);
+
+ if (sysctl(cachemib, sizeof(cachemib) / sizeof(cachemib[0]),
+ &_cachelinesize, &clen, NULL, 0) < 0 || !_cachelinesize) {
+ abort();
+ }
+}
+#endif
+
+void
+__syncicache(void *from, int len)
+{
+ int l, off;
+ char *p;
+
+#if !defined(_KERNEL) && !defined(_STANDALONE)
+ if (!_cachelinesize)
+ getcachelinesize();
+#endif
+ off = (u_int)from & (CACHELINESIZE - 1);
+ l = len += off;
+ p = (char *)from - off;
+ do {
+ __asm __volatile ("dcbst 0,%0" :: "r"(p));
+ p += CACHELINESIZE;
+ } while ((l -= CACHELINESIZE) > 0);
+ __asm __volatile ("sync");
+ p = (char *)from - off;
+ do {
+ __asm __volatile ("icbi 0,%0" :: "r"(p));
+ p += CACHELINESIZE;
+ } while ((len -= CACHELINESIZE) > 0);
+ __asm __volatile ("isync");
+}
diff --git a/sys/powerpc/powerpc/trap.c b/sys/powerpc/powerpc/trap.c
new file mode 100644
index 0000000..408ed44
--- /dev/null
+++ b/sys/powerpc/powerpc/trap.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: trap.c,v 1.26 2000/05/27 00:40:40 sommerfeld Exp $
+ */
+
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD$";
+#endif /* not lint */
+
+#include "opt_ddb.h"
+#include "opt_ktrace.h"
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/syscall.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/user.h>
+#include <sys/ktrace.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/pmap.h>
+
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/psl.h>
+#include <machine/trap.h>
+
+/* These definitions should probably be somewhere else XXX */
+#define FIRSTARG 3 /* first argument is in reg 3 */
+#define NARGREG 8 /* 8 args are in registers */
+#define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */
+
+volatile int astpending;
+volatile int want_resched;
+
+#if 0 /* XXX: not used yet */
+static int fix_unaligned __P((struct proc *p, struct trapframe *frame));
+#endif
+
+void
+trap(struct trapframe *frame)
+{
+#if 0 /* XXX: This code hasn't been reworked yet. */
+ struct proc *p;
+ int type;
+ u_quad_t sticks;
+
+ p = curproc;
+ type = frame->exc;
+
+ if (frame->srr1 & PSL_PR) {
+ type |= EXC_USER;
+ sticks = p->p_sticks;
+ }
+
+ switch (type) {
+ case EXC_TRC|EXC_USER:
+ frame->srr1 &= ~PSL_SE;
+ trapsignal(p, SIGTRAP, EXC_TRC);
+ break;
+ case EXC_DSI:
+ {
+ vm_map_t map;
+ vaddr_t va;
+ int ftype;
+ faultbuf *fb;
+
+ map = kernel_map;
+ va = frame->dar;
+ if ((va >> ADDR_SR_SHFT) == USER_SR) {
+ sr_t user_sr;
+
+ __asm ("mfsr %0, %1"
+ : "=r"(user_sr) : "K"(USER_SR));
+ va &= ADDR_PIDX | ADDR_POFF;
+ va |= user_sr << ADDR_SR_SHFT;
+ map = &p->p_vmspace->vm_map;
+ }
+ if (frame->dsisr & DSISR_STORE)
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+ if (uvm_fault(map, trunc_page(va), 0, ftype)
+ == KERN_SUCCESS)
+ return;
+ if (fb = p->p_addr->u_pcb.pcb_onfault) {
+ frame->srr0 = (*fb)[0];
+ frame->fixreg[1] = (*fb)[1];
+ frame->fixreg[2] = (*fb)[2];
+ frame->cr = (*fb)[3];
+ bcopy(&(*fb)[4], &frame->fixreg[13],
+ 19 * sizeof(register_t));
+ return;
+ }
+ map = kernel_map;
+ }
+ goto brain_damage;
+ case EXC_DSI|EXC_USER:
+ {
+ int ftype, rv;
+
+ if (frame->dsisr & DSISR_STORE)
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+ if ((rv = uvm_fault(&p->p_vmspace->vm_map,
+ trunc_page(frame->dar), 0, ftype))
+ == KERN_SUCCESS)
+ break;
+ if (rv == KERN_RESOURCE_SHORTAGE) {
+ printf("UVM: pid %d (%s), uid %d killed: "
+ "out of swap\n",
+ p->p_pid, p->p_comm,
+ p->p_cred && p->p_ucred ?
+ p->p_ucred->cr_uid : -1);
+ trapsignal(p, SIGKILL, EXC_DSI);
+ } else {
+ trapsignal(p, SIGSEGV, EXC_DSI);
+ }
+ }
+ break;
+ case EXC_ISI|EXC_USER:
+ {
+ int ftype;
+
+ ftype = VM_PROT_READ | VM_PROT_EXECUTE;
+ if (uvm_fault(&p->p_vmspace->vm_map,
+ trunc_page(frame->srr0), 0, ftype)
+ == KERN_SUCCESS)
+ break;
+ }
+ trapsignal(p, SIGSEGV, EXC_ISI);
+ break;
+ case EXC_SC|EXC_USER:
+ {
+ struct sysent *callp;
+ size_t argsize;
+ register_t code, error;
+ register_t *params, rval[2];
+ int nsys, n;
+ register_t args[10];
+
+ uvmexp.syscalls++;
+
+ nsys = p->p_emul->e_nsysent;
+ callp = p->p_emul->e_sysent;
+
+ code = frame->fixreg[0];
+ params = frame->fixreg + FIRSTARG;
+
+ switch (code) {
+ case SYS_syscall:
+ /*
+ * code is first argument,
+ * followed by actual args.
+ */
+ code = *params++;
+ break;
+ case SYS___syscall:
+ /*
+ * Like syscall, but code is a quad,
+ * so as to maintain quad alignment
+ * for the rest of the args.
+ */
+ if (callp != sysent)
+ break;
+ params++;
+ code = *params++;
+ break;
+ default:
+ break;
+ }
+ if (code < 0 || code >= nsys)
+ callp += p->p_emul->e_nosys;
+ else
+ callp += code;
+ argsize = callp->sy_argsize;
+ n = NARGREG - (params - (frame->fixreg + FIRSTARG));
+ if (argsize > n * sizeof(register_t)) {
+ bcopy(params, args, n * sizeof(register_t));
+ if (error = copyin(MOREARGS(frame->fixreg[1]),
+ args + n,
+ argsize - n * sizeof(register_t))) {
+#ifdef KTRACE
+ /* Can't get all the arguments! */
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p, code, argsize,
+ args);
+#endif
+ goto syscall_bad;
+ }
+ params = args;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p, code, argsize, params);
+#endif
+ rval[0] = 0;
+ rval[1] = frame->fixreg[FIRSTARG + 1];
+
+ switch (error = (*callp->sy_call)(p, params, rval)) {
+ case 0:
+ frame->fixreg[FIRSTARG] = rval[0];
+ frame->fixreg[FIRSTARG + 1] = rval[1];
+ frame->cr &= ~0x10000000;
+ break;
+ case ERESTART:
+ /*
+ * Set user's pc back to redo the system call.
+ */
+ frame->srr0 -= 4;
+ break;
+ case EJUSTRETURN:
+ /* nothing to do */
+ break;
+ default:
+syscall_bad:
+ if (p->p_emul->e_errno)
+ error = p->p_emul->e_errno[error];
+ frame->fixreg[FIRSTARG] = error;
+ frame->cr |= 0x10000000;
+ break;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, code, error, rval[0]);
+#endif
+ }
+ break;
+
+ case EXC_FPU|EXC_USER:
+ if (fpuproc)
+ save_fpu(fpuproc);
+ fpuproc = p;
+ enable_fpu(p);
+ break;
+
+ case EXC_AST|EXC_USER:
+ /* This is just here that we trap */
+ break;
+
+ case EXC_ALI|EXC_USER:
+ if (fix_unaligned(p, frame) != 0)
+ trapsignal(p, SIGBUS, EXC_ALI);
+ else
+ frame->srr0 += 4;
+ break;
+
+ case EXC_PGM|EXC_USER:
+/* XXX temporarily */
+ if (frame->srr1 & 0x0002000)
+ trapsignal(p, SIGTRAP, EXC_PGM);
+ else
+ trapsignal(p, SIGILL, EXC_PGM);
+ break;
+
+ case EXC_MCHK:
+ {
+ faultbuf *fb;
+
+ if (fb = p->p_addr->u_pcb.pcb_onfault) {
+ frame->srr0 = (*fb)[0];
+ frame->fixreg[1] = (*fb)[1];
+ frame->fixreg[2] = (*fb)[2];
+ frame->cr = (*fb)[3];
+ bcopy(&(*fb)[4], &frame->fixreg[13],
+ 19 * sizeof(register_t));
+ return;
+ }
+ }
+ goto brain_damage;
+
+ default:
+brain_damage:
+ printf("trap type %x at %x\n", type, frame->srr0);
+#ifdef DDB
+ Debugger(); /* XXX temporarily */
+#endif
+#ifdef TRAP_PANICWAIT
+ printf("Press a key to panic.\n");
+ cngetc();
+#endif
+ panic("trap");
+ }
+
+ astpending = 0; /* we are about to do it */
+
+ uvmexp.softs++;
+
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+
+ /* take pending signals */
+ {
+ int sig;
+
+ while (sig = CURSIG(p))
+ postsig(sig);
+ }
+
+ p->p_priority = p->p_usrpri;
+ if (want_resched) {
+ int sig;
+ /*
+ * We are being preempted.
+ */
+ preempt(NULL);
+ while (sig = CURSIG(p))
+ postsig(sig);
+ }
+
+ /*
+ * If profiling, charge recent system time to the trapped pc.
+ */
+ if (p->p_flag & P_PROFIL) {
+ extern int psratio;
+
+ addupc_task(p, frame->srr0,
+ (int)(p->p_sticks - sticks) * psratio);
+ }
+ /*
+ * If someone stole the fpu while we were away, disable it
+ */
+ if (p != fpuproc)
+ frame->srr1 &= ~PSL_FP;
+ curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
+#endif
+}
+
+#if 0 /* XXX: child_return not used */
+void
+child_return(void *arg)
+{
+ struct proc *p;
+ struct trapframe *tf;
+
+ p = arg;
+ tf = trapframe(p);
+
+ tf->fixreg[FIRSTARG] = 0;
+ tf->fixreg[FIRSTARG + 1] = 1;
+ tf->cr &= ~0x10000000;
+ tf->srr1 &= ~PSL_FP; /* Disable FPU, as we can't be fpuproc */
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, SYS_fork, 0, 0);
+#endif
+ /* Profiling? XXX */
+ curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
+}
+#endif
+
+static __inline void
+setusr(int content)
+{
+
+ __asm __volatile ("isync; mtsr %0,%1; isync"
+ :: "n"(USER_SR), "r"(content));
+}
+
+int
+copyin(udaddr, kaddr, len)
+ const void *udaddr;
+ void *kaddr;
+ size_t len;
+{
+ const char *up;
+ char *kp;
+ char *p;
+ size_t l;
+ faultbuf env;
+
+ up = udaddr;
+ kp = kaddr;
+
+#if 0
+ if (setfault(env)) {
+ curpcb->pcb_onfault = 0;
+ return EFAULT;
+ }
+#endif
+ while (len > 0) {
+ p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
+ l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
+ if (l > len)
+ l = len;
+ setusr(curpcb->pcb_pm->pm_sr[(u_int)up >> ADDR_SR_SHFT]);
+ bcopy(p, kp, l);
+ up += l;
+ kp += l;
+ len -= l;
+ }
+ curpcb->pcb_onfault = 0;
+ return 0;
+}
+
+int
+copyout(kaddr, udaddr, len)
+ const void *kaddr;
+ void *udaddr;
+ size_t len;
+{
+ const char *kp;
+ char *up;
+ char *p;
+ size_t l;
+ faultbuf env;
+
+ kp = kaddr;
+ up = udaddr;
+
+#if 0
+ if (setfault(env)) {
+ curpcb->pcb_onfault = 0;
+ return EFAULT;
+ }
+#endif
+ while (len > 0) {
+ p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
+ l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
+ if (l > len)
+ l = len;
+ setusr(curpcb->pcb_pm->pm_sr[(u_int)up >> ADDR_SR_SHFT]);
+ bcopy(kp, p, l);
+ up += l;
+ kp += l;
+ len -= l;
+ }
+ curpcb->pcb_onfault = 0;
+ return 0;
+}
+
+#if 0 /* XXX: not used yet */
+/*
+ * kcopy(const void *src, void *dst, size_t len);
+ *
+ * Copy len bytes from src to dst, aborting if we encounter a fatal
+ * page fault.
+ *
+ * kcopy() _must_ save and restore the old fault handler since it is
+ * called by uiomove(), which may be in the path of servicing a non-fatal
+ * page fault.
+ */
+int
+kcopy(const void *src, void *dst, size_t len)
+{
+ faultbuf env, *oldfault;
+
+ oldfault = curpcb->pcb_onfault;
+ if (setfault(env)) {
+ curpcb->pcb_onfault = oldfault;
+ return EFAULT;
+ }
+
+ bcopy(src, dst, len);
+
+ curpcb->pcb_onfault = oldfault;
+ return 0;
+}
+
+int
+badaddr(void *addr, size_t size)
+{
+
+ return badaddr_read(addr, size, NULL);
+}
+
+int
+badaddr_read(void *addr, size_t size, int *rptr)
+{
+ faultbuf env;
+ int x;
+
+ /* Get rid of any stale machine checks that have been waiting. */
+ __asm __volatile ("sync; isync");
+
+ if (setfault(env)) {
+ curpcb->pcb_onfault = 0;
+ __asm __volatile ("sync");
+ return 1;
+ }
+
+ __asm __volatile ("sync");
+
+ switch (size) {
+ case 1:
+ x = *(volatile int8_t *)addr;
+ break;
+ case 2:
+ x = *(volatile int16_t *)addr;
+ break;
+ case 4:
+ x = *(volatile int32_t *)addr;
+ break;
+ default:
+ panic("badaddr: invalid size (%d)", size);
+ }
+
+ /* Make sure we took the machine check, if we caused one. */
+ __asm __volatile ("sync; isync");
+
+ curpcb->pcb_onfault = 0;
+ __asm __volatile ("sync"); /* To be sure. */
+
+ /* Use the value to avoid reorder. */
+ if (rptr)
+ *rptr = x;
+
+ return 0;
+}
+#endif
+
+/*
+ * For now, this only deals with the particular unaligned access case
+ * that gcc tends to generate. Eventually it should handle all of the
+ * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
+ */
+
+#if 0 /* XXX: Not used yet */
+static int
+fix_unaligned(p, frame)
+ struct proc *p;
+ struct trapframe *frame;
+{
+ int indicator;
+
+ indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);
+
+ switch (indicator) {
+ case EXC_ALI_LFD:
+ case EXC_ALI_STFD:
+ {
+ int reg = EXC_ALI_RST(frame->dsisr);
+ double *fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg];
+
+ /* Juggle the FPU to ensure that we've initialized
+ * the FPRs, and that their current state is in
+ * the PCB.
+ */
+ if (fpuproc != p) {
+ if (fpuproc)
+ save_fpu(fpuproc);
+ enable_fpu(p);
+ }
+ save_fpu(p);
+
+ if (indicator == EXC_ALI_LFD) {
+ if (copyin((void *)frame->dar, fpr,
+ sizeof(double)) != 0)
+ return -1;
+ enable_fpu(p);
+ } else {
+ if (copyout(fpr, (void *)frame->dar,
+ sizeof(double)) != 0)
+ return -1;
+ }
+ return 0;
+ }
+ break;
+ }
+
+ return -1;
+}
+#endif
+
+void
+userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
+{
+
+ /* XXX: Coming soon */
+ return;
+}
OpenPOWER on IntegriCloud