summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordfr <dfr@FreeBSD.org>2000-09-29 13:46:07 +0000
committerdfr <dfr@FreeBSD.org>2000-09-29 13:46:07 +0000
commit263f9f686317c48283eeb1e362d5f2b85791d899 (patch)
tree8fe9163c4a062545fc39dae57fe3beec1daf9cdb
parentfa6af7ab3eed1a53d0df517fc2c374fd886c6e63 (diff)
downloadFreeBSD-src-263f9f686317c48283eeb1e362d5f2b85791d899.zip
FreeBSD-src-263f9f686317c48283eeb1e362d5f2b85791d899.tar.gz
This is the first snapshot of the FreeBSD/ia64 kernel. This kernel will
not work on any real hardware (or fully work on any simulator). Much more needs to happen before this is actually functional but its nice to see the FreeBSD copyright message appear in the ia64 simulator.
-rw-r--r--sys/amd64/amd64/in_cksum.c249
-rw-r--r--sys/amd64/include/in_cksum.h80
-rw-r--r--sys/conf/Makefile.ia64340
-rw-r--r--sys/conf/files.ia64113
-rw-r--r--sys/conf/ldscript.ia64134
-rw-r--r--sys/conf/options.ia6455
-rw-r--r--sys/ia64/conf/GENERIC165
-rw-r--r--sys/ia64/conf/GENERIC.hints26
-rw-r--r--sys/ia64/ia64/autoconf.c121
-rw-r--r--sys/ia64/ia64/busdma_machdep.c718
-rw-r--r--sys/ia64/ia64/clock.c511
-rw-r--r--sys/ia64/ia64/clock_if.m52
-rw-r--r--sys/ia64/ia64/elf_machdep.c76
-rw-r--r--sys/ia64/ia64/exception.S949
-rw-r--r--sys/ia64/ia64/exception.s949
-rw-r--r--sys/ia64/ia64/genassym.c109
-rw-r--r--sys/ia64/ia64/genassym.sh53
-rw-r--r--sys/ia64/ia64/in_cksum.c249
-rw-r--r--sys/ia64/ia64/interrupt.c187
-rw-r--r--sys/ia64/ia64/ipl_funcs.c222
-rw-r--r--sys/ia64/ia64/locore.S394
-rw-r--r--sys/ia64/ia64/locore.s394
-rw-r--r--sys/ia64/ia64/machdep.c1364
-rw-r--r--sys/ia64/ia64/mem.c296
-rw-r--r--sys/ia64/ia64/mp_machdep.c789
-rw-r--r--sys/ia64/ia64/pal.S71
-rw-r--r--sys/ia64/ia64/pal.s71
-rw-r--r--sys/ia64/ia64/pmap.c2388
-rw-r--r--sys/ia64/ia64/procfs_machdep.c152
-rw-r--r--sys/ia64/ia64/ssc.c263
-rw-r--r--sys/ia64/ia64/support.S662
-rw-r--r--sys/ia64/ia64/support.s662
-rw-r--r--sys/ia64/ia64/swtch.s155
-rw-r--r--sys/ia64/ia64/synch_machdep.c562
-rw-r--r--sys/ia64/ia64/sys_machdep.c79
-rw-r--r--sys/ia64/ia64/timerreg.h110
-rw-r--r--sys/ia64/ia64/trap.c782
-rw-r--r--sys/ia64/ia64/vm_machdep.c494
-rw-r--r--sys/ia64/include/_limits.h96
-rw-r--r--sys/ia64/include/ansi.h119
-rw-r--r--sys/ia64/include/asm.h257
-rw-r--r--sys/ia64/include/atomic.h190
-rw-r--r--sys/ia64/include/bootinfo.h86
-rw-r--r--sys/ia64/include/bus.h1173
-rw-r--r--sys/ia64/include/bus_memio.h31
-rw-r--r--sys/ia64/include/bus_pio.h31
-rw-r--r--sys/ia64/include/clock.h25
-rw-r--r--sys/ia64/include/clockvar.h53
-rw-r--r--sys/ia64/include/console.h10
-rw-r--r--sys/ia64/include/cpu.h174
-rw-r--r--sys/ia64/include/cpufunc.h197
-rw-r--r--sys/ia64/include/db_machdep.h91
-rw-r--r--sys/ia64/include/efi.h71
-rw-r--r--sys/ia64/include/elf.h203
-rw-r--r--sys/ia64/include/endian.h97
-rw-r--r--sys/ia64/include/exec.h35
-rw-r--r--sys/ia64/include/float.h79
-rw-r--r--sys/ia64/include/floatingpoint.h35
-rw-r--r--sys/ia64/include/fpu.h81
-rw-r--r--sys/ia64/include/frame.h114
-rw-r--r--sys/ia64/include/globaldata.h78
-rw-r--r--sys/ia64/include/globals.h63
-rw-r--r--sys/ia64/include/ia64_cpu.h203
-rw-r--r--sys/ia64/include/ieee.h124
-rw-r--r--sys/ia64/include/ieeefp.h29
-rw-r--r--sys/ia64/include/in_cksum.h80
-rw-r--r--sys/ia64/include/intr.h37
-rw-r--r--sys/ia64/include/intrcnt.h79
-rw-r--r--sys/ia64/include/ioctl_bt848.h288
-rw-r--r--sys/ia64/include/ioctl_fd.h126
-rw-r--r--sys/ia64/include/ioctl_meteor.h187
-rw-r--r--sys/ia64/include/ipl.h132
-rw-r--r--sys/ia64/include/limits.h96
-rw-r--r--sys/ia64/include/lock.h58
-rw-r--r--sys/ia64/include/md_var.h57
-rw-r--r--sys/ia64/include/mouse.h336
-rw-r--r--sys/ia64/include/mutex.h563
-rw-r--r--sys/ia64/include/pal.h95
-rw-r--r--sys/ia64/include/param.h163
-rw-r--r--sys/ia64/include/pcb.h68
-rw-r--r--sys/ia64/include/pcpu.h78
-rw-r--r--sys/ia64/include/pmap.h233
-rw-r--r--sys/ia64/include/proc.h49
-rw-r--r--sys/ia64/include/profile.h237
-rw-r--r--sys/ia64/include/ptrace.h53
-rw-r--r--sys/ia64/include/reg.h58
-rw-r--r--sys/ia64/include/reloc.h33
-rw-r--r--sys/ia64/include/resource.h44
-rw-r--r--sys/ia64/include/setjmp.h46
-rw-r--r--sys/ia64/include/sigframe.h42
-rw-r--r--sys/ia64/include/signal.h77
-rw-r--r--sys/ia64/include/smp.h48
-rw-r--r--sys/ia64/include/stdarg.h50
-rw-r--r--sys/ia64/include/sysarch.h49
-rw-r--r--sys/ia64/include/types.h73
-rw-r--r--sys/ia64/include/ucontext.h67
-rw-r--r--sys/ia64/include/varargs.h57
-rw-r--r--sys/ia64/include/vmparam.h161
-rw-r--r--sys/ia64/isa/isa.c171
-rw-r--r--sys/ia64/isa/isa_dma.c512
100 files changed, 23294 insertions, 0 deletions
diff --git a/sys/amd64/amd64/in_cksum.c b/sys/amd64/amd64/in_cksum.c
new file mode 100644
index 0000000..4b7fca2
--- /dev/null
+++ b/sys/amd64/amd64/in_cksum.c
@@ -0,0 +1,249 @@
+/* $FreeBSD$ */
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1996
+ * Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ * (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+
+static const u_int32_t in_masks[] = {
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+u_int64_t in_cksumdata __P((caddr_t buf, int len));
+
+u_int64_t
+in_cksumdata(buf, len)
+ register caddr_t buf;
+ register int len;
+{
+ const u_int32_t *lw = (u_int32_t *) buf;
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (long) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (long) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((long) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(m, len, skip)
+ struct mbuf *m;
+ int len;
+ int skip;
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ if ((clen ^ (long) addr) & 1)
+ sum += in_cksumdata(addr, mlen) << 8;
+ else
+ sum += in_cksumdata(addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(ip)
+ const struct ip *ip;
+{
+ u_int64_t sum = in_cksumdata((caddr_t) ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+ REDUCE16;
+ return (~sum & 0xffff);
+}
diff --git a/sys/amd64/include/in_cksum.h b/sys/amd64/include/in_cksum.h
new file mode 100644
index 0000000..7a98720
--- /dev/null
+++ b/sys/amd64/include/in_cksum.h
@@ -0,0 +1,80 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#include <sys/cdefs.h>
+
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __GNUC__
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+ int __tmpsum;
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define in_cksum_update(ip) \
+ do { \
+ int __tmpsum; \
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+ } while(0)
+
+#endif
+
+#ifdef _KERNEL
+u_int in_cksum_hdr(const struct ip *ip);
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/sys/conf/Makefile.ia64 b/sys/conf/Makefile.ia64
new file mode 100644
index 0000000..c75c199
--- /dev/null
+++ b/sys/conf/Makefile.ia64
@@ -0,0 +1,340 @@
+# Makefile.ia64 -- with config changes.
+# Copyright 1990 W. Jolitz
+# from: src/sys/conf/Makefile.alpha,v 1.76
+# $FreeBSD$
+#
+# Makefile for FreeBSD
+#
+# This makefile is constructed from a machine description:
+# config machineid
+# Most changes should be made in the machine description
+# /sys/alpha/conf/``machineid''
+# after which you should do
+# config machineid
+# Generic makefile changes should be made in
+# /sys/alpha/conf/Makefile.alpha
+# after which config should be rerun for all machines.
+#
+
+# The Linux cross tools don't understand -fformat-extensions
+CWARNFLAGS= -Wall -Wredundant-decls -Wnested-externs -Wstrict-prototypes \
+ -Wmissing-prototypes -Wpointer-arith -Winline -Wcast-qual \
+ -ansi
+CC= ia64-unknown-linux-gcc -D__FreeBSD__ -U__linux__
+LD= ia64-unknown-linux-ld
+SIZE= ia64-unknown-linux-size
+OBJCOPY= ia64-unknown-linux-objcopy
+MACHINE_ARCH= ia64
+
+# Which version of config(8) is required.
+%VERSREQ= 500003
+
+# Can be overridden by makeoptions or /etc/make.conf
+KERNEL_KO?= kernel
+KERNEL?= kernel
+KODIR?= /boot/${KERNEL}
+STD8X16FONT?= iso
+
+.if !defined(S)
+.if exists(./@/.)
+S= ./@
+.else
+S= ../..
+.endif
+.endif
+M= ${MACHINE_ARCH}
+
+SIZE?= size
+OBJCOPY?= objcopy
+
+COPTFLAGS?=-O
+INCLUDES= -nostdinc -I- -I. -I$S
+# This hack is to allow kernel compiles to succeed on machines w/out srcdist
+.if exists($S/../include)
+INCLUDES+= -I$S/../include
+.else
+INCLUDES+= -I/usr/include
+.endif
+COPTS= ${INCLUDES} ${IDENT} -D_KERNEL -include opt_global.h
+CFLAGS= ${COPTFLAGS} ${CWARNFLAGS} ${DEBUG} ${COPTS}
+
+# XXX LOCORE means "don't declare C stuff" not "for locore.s".
+ASM_CFLAGS= -x assembler-with-cpp -Wa,-x -DLOCORE ${CFLAGS}
+
+# Select the correct set of tools. Can't set OBJFORMAT here because it
+# doesn't get exported into the environment, and if it were exported
+# then it might break building of utilities.
+#FMT= -elf
+CFLAGS+= ${FMT}
+
+DEFINED_PROF= ${PROF}
+.if defined(PROF)
+CFLAGS+= -malign-functions=4
+.if ${PROFLEVEL} >= 2
+IDENT+= -DGPROF4 -DGUPROF
+PROF+= -mprofiler-epilogue
+.endif
+.endif
+
+# Put configuration-specific C flags last (except for ${PROF}) so that they
+# can override the others.
+CFLAGS+= ${CONF_CFLAGS}
+
+NORMAL_C= ${CC} -c ${CFLAGS} ${PROF} ${.IMPSRC}
+NORMAL_C_C= ${CC} -c ${CFLAGS} ${PROF} ${.IMPSRC}
+NORMAL_S= ${CC} -c ${ASM_CFLAGS} ${.IMPSRC}
+PROFILE_C= ${CC} -c ${CFLAGS} ${.IMPSRC}
+
+NORMAL_M= perl5 $S/kern/makeobjops.pl -c $<; \
+ ${CC} -c ${CFLAGS} ${PROF} ${.PREFIX}.c
+
+GEN_CFILES= $S/$M/$M/genassym.c
+# setdef0.c and setdef1.c are intentionally
+# omitted from SYSTEM_CFILES. They include setdefs.h, a header which
+# is generated from all of ${OBJS}. We don't want to have to compile
+# everything just to do a make depend.
+SYSTEM_CFILES= param.c vnode_if.c hints.c config.c
+SYSTEM_SFILES= $S/$M/$M/locore.s
+SYSTEM_DEP= Makefile ${SYSTEM_OBJS}
+SYSTEM_OBJS= locore.o setdef0.o vnode_if.o ${OBJS} param.o hints.o config.o \
+ setdef1.o # hack.So ski can't cope with dynamic relocs
+SYSTEM_LD= @${LD} ${FMT} -Bdynamic -T $S/conf/ldscript.$M \
+ -e locorestart \
+ -export-dynamic -dynamic-linker /red/herring \
+ -o ${.TARGET} -X ${SYSTEM_OBJS} vers.o
+SYSTEM_LD_TAIL= @${OBJCOPY} --strip-symbol gcc2_compiled. ${.TARGET} ; \
+ ${SIZE} ${FMT} ${.TARGET} ; chmod 755 ${.TARGET}
+SYSTEM_DEP+= $S/conf/ldscript.$M
+
+%BEFORE_DEPEND
+
+%OBJS
+
+%CFILES
+
+%SFILES
+
+%MFILES
+
+%CLEAN
+
+.PHONY: all modules
+all: ${KERNEL_KO}
+
+depend: kernel-depend
+clean: kernel-clean
+cleandepend: kernel-cleandepend
+tags: kernel-tags
+install install.debug: kernel-install
+reinstall reinstall.debug: kernel-reinstall
+
+.if !defined(DEBUG)
+FULLKERNEL= ${KERNEL_KO}
+.else
+FULLKERNEL= ${KERNEL_KO}.debug
+${KERNEL_KO}: ${FULLKERNEL}
+ ${OBJCOPY} --strip-debug ${FULLKERNEL} ${KERNEL_KO}
+.endif
+
+${FULLKERNEL}: ${SYSTEM_DEP} vers.o
+ @rm -f ${.TARGET}
+ @echo linking ${.TARGET}
+ ${SYSTEM_LD}
+ ${SYSTEM_LD_TAIL}
+
+.if !exists(.depend)
+${SYSTEM_OBJS}: vnode_if.h ${BEFORE_DEPEND:M*.h} ${MFILES:T:S/.m$/.h/}
+.endif
+
+.for mfile in ${MFILES}
+${mfile:T:S/.m$/.h/}: ${mfile}
+ perl5 $S/kern/makeobjops.pl -h ${mfile}
+.endfor
+
+kernel-clean:
+ rm -f *.o *.so *.So *.ko *.s eddep errs \
+ ${FULLKERNEL} ${KERNEL_KO} linterrs makelinks param.c \
+ setdef[01].c setdefs.h tags \
+ vers.c vnode_if.c vnode_if.h \
+ ${MFILES:T:S/.m$/.c/} ${MFILES:T:S/.m$/.h/} \
+ ${CLEAN}
+
+#lint: /tmp param.c
+# @lint -hbxn -DGENERIC -Dvolatile= ${COPTS} \
+# $S/$M/$M/Locore.c ${CFILES} param.c | \
+# grep -v 'struct/union .* never defined' | \
+# grep -v 'possible pointer alignment problem'
+
+locore.o: $S/$M/$M/locore.s assym.s
+ ${NORMAL_S}
+
+# This is a hack. BFD "optimizes" away dynamic mode if there are no
+# dynamic references. We could probably do a '-Bforcedynamic' mode like
+# in the a.out ld. For now, this works.
+hack.So: Makefile
+ touch hack.c
+ ${CC} ${FMT} -shared -nostdlib hack.c -o hack.So
+ rm -f hack.c
+
+.ORDER: setdefs.h setdef0.c setdef1.c
+
+setdef0.o: setdef0.c setdefs.h
+ ${NORMAL_C}
+
+setdef1.o: setdef1.c setdefs.h
+ ${NORMAL_C}
+
+setdef0.c setdef1.c setdefs.h: Makefile ${OBJS}
+ @./gensetdefs ${OBJS}
+
+# this rule stops ./assym.s in .depend from causing problems
+./assym.s: assym.s
+
+assym.s: $S/$M/$M/genassym.sh genassym.o
+ sh $S/$M/$M/genassym.sh genassym.o > ${.TARGET}
+
+genassym.o: $S/$M/$M/genassym.c
+ ${CC} -c ${CFLAGS} $S/$M/$M/genassym.c
+
+${SYSTEM_OBJS} genassym.o vers.o: opt_global.h
+
+kernel-depend: assym.s param.c vnode_if.h ${BEFORE_DEPEND} \
+ ${CFILES} ${SYSTEM_CFILES} ${GEN_CFILES} ${SFILES} \
+ ${SYSTEM_SFILES} ${MFILES:T:S/.m$/.h/}
+ rm -f .newdep
+ CC="${CC}" mkdep -a -f .newdep ${CFLAGS} ${CFILES} ${SYSTEM_CFILES} ${GEN_CFILES}
+ env MKDEP_CPP="${CC} -E" \
+ mkdep -a -f .newdep ${ASM_CFLAGS} ${SFILES} ${SYSTEM_SFILES}
+ rm -f .depend
+ mv -f .newdep .depend
+
+kernel-cleandepend:
+ rm -f .depend
+
+links:
+ egrep '#if' ${CFILES} | sed -f $S/conf/defines | \
+ sed -e 's/:.*//' -e 's/\.c/.o/' | sort -u > dontlink
+ echo ${CFILES} | tr -s ' ' '\12' | sed 's/\.c/.o/' | \
+ sort -u | comm -23 - dontlink | \
+ sed 's,../.*/\(.*.o\),rm -f \1;ln -s ../GENERIC/\1 \1,' > makelinks
+ sh makelinks && rm -f dontlink
+
+kernel-tags:
+ @[ -f .depend ] || { echo "you must make depend first"; exit 1; }
+ sh $S/conf/systags.sh
+ rm -f tags1
+ sed -e 's, ../, ,' tags > tags1
+
+kernel-install kernel-install.debug:
+.if exists(${DESTDIR}/boot)
+ @if [ ! -f ${DESTDIR}/boot/device.hints ] ; then \
+ echo "You must set up a ${DESTDIR}/boot/device.hints file first." ; \
+ exit 1 ; \
+ fi
+ @if [ x"`grep device.hints ${DESTDIR}/boot/defaults/loader.conf ${DESTDIR}/boot/loader.conf`" = "x" ]; then \
+ echo "You must activate /boot/device.hints in loader.conf." ; \
+ exit 1 ; \
+ fi
+.endif
+ @if [ ! -f ${KERNEL_KO}${.TARGET:S/kernel-install//} ] ; then \
+ echo "You must build a kernel first." ; \
+ exit 1 ; \
+ fi
+.if exists(${DESTDIR}${KODIR})
+.if exists(${DESTDIR}${KODIR}.old)
+ @-chflags -R noschg ${DESTDIR}${KODIR}.old
+ -rm -rf ${DESTDIR}${KODIR}.old
+.endif
+ mv ${DESTDIR}${KODIR} ${DESTDIR}${KODIR}.old
+.endif
+ mkdir -p ${DESTDIR}${KODIR}
+ @if [ -f ${DESTDIR}${KODIR}/${KERNEL_KO} ] ; then \
+ chflags noschg ${DESTDIR}${KODIR}/${KERNEL_KO} ; \
+ fi
+ install -c -m 555 -o root -g wheel -fschg \
+ ${KERNEL_KO}${.TARGET:S/kernel-install//} ${DESTDIR}${KODIR}
+
+kernel-reinstall kernel-reinstall.debug:
+ @if [ -f ${DESTDIR}${KODIR}/${KERNEL_KO} ] ; then \
+ chflags noschg ${DESTDIR}${KODIR}/${KERNEL_KO} ; \
+ fi
+ install -c -m 555 -o root -g wheel -fschg \
+ ${KERNEL_KO}${.TARGET:S/kernel-reinstall//} ${DESTDIR}${KODIR}
+
+.if !defined(MODULES_WITH_WORLD) && !defined(NO_MODULES) && exists($S/modules)
+all: modules
+depend: modules-depend
+clean: modules-clean
+cleandepend: modules-cleandepend
+tags: modules-tags
+install install.debug: modules-install
+reinstall reinstall.debug: modules-reinstall
+.endif
+
+MKMODULESENV= MAKEOBJDIRPREFIX=${.OBJDIR}/modules KMODDIR=${DESTDIR}${KODIR}
+
+modules:
+ @mkdir -p ${.OBJDIR}/modules
+ cd $S/modules && env ${MKMODULESENV} ${MAKE} obj all
+
+modules-depend:
+ @mkdir -p ${.OBJDIR}/modules
+ cd $S/modules && env ${MKMODULESENV} ${MAKE} obj depend
+
+modules-clean:
+ cd $S/modules && env ${MKMODULESENV} ${MAKE} clean
+
+modules-cleandepend:
+ cd $S/modules && env ${MKMODULESENV} ${MAKE} cleandepend
+
+modules-cleandir:
+ cd $S/modules && env ${MKMODULESENV} ${MAKE} cleandir
+
+modules-tags:
+ cd $S/modules && env ${MKMODULESENV} ${MAKE} tags
+
+modules-install modules-install.debug:
+ cd $S/modules && env ${MKMODULESENV} ${MAKE} install
+
+modules-reinstall modules-reinstall.debug:
+ cd $S/modules && env ${MKMODULESENV} ${MAKE} install
+
+config.o:
+ ${NORMAL_C}
+
+param.c: $S/conf/param.c
+ -rm -f param.c
+ cp $S/conf/param.c .
+
+param.o:
+ ${NORMAL_C}
+
+vers.c: $S/conf/newvers.sh $S/sys/param.h ${SYSTEM_DEP}
+ sh $S/conf/newvers.sh ${KERN_IDENT} ${IDENT}
+
+# XXX strictly, everything depends on Makefile because changes to ${PROF}
+# only appear there, but we don't handle that.
+vers.o:
+ ${NORMAL_C}
+
+hints.o: hints.c
+ ${NORMAL_C}
+
+vnode_if.c: $S/kern/vnode_if.pl $S/kern/vnode_if.src
+ perl5 $S/kern/vnode_if.pl -c $S/kern/vnode_if.src
+
+vnode_if.h: $S/kern/vnode_if.pl $S/kern/vnode_if.src
+ perl5 $S/kern/vnode_if.pl -h $S/kern/vnode_if.src
+
+vnode_if.o:
+ ${NORMAL_C}
+
+.if exists($S/../share/mk)
+.include "$S/../share/mk/bsd.kern.mk"
+.else
+.include <bsd.kern.mk>
+.endif
+
+%RULES
+
+# DO NOT DELETE THIS LINE -- make depend uses it
diff --git a/sys/conf/files.ia64 b/sys/conf/files.ia64
new file mode 100644
index 0000000..a2b3e67
--- /dev/null
+++ b/sys/conf/files.ia64
@@ -0,0 +1,113 @@
+# This file tells config what files go into building a kernel,
+# files marked standard are always included.
+#
+# $FreeBSD$
+#
+# The long compile-with and dependency lines are required because of
+# limitations in config: backslash-newline doesn't work in strings, and
+# dependency lines other than the first are silently ignored.
+#
+#
+font8x16.o optional std8x16font \
+ compile-with "uudecode < /usr/share/syscons/fonts/${STD8X16FONT}-8x16.fnt && file2c 'unsigned char font_16[16*256] = {' '};' < ${STD8X16FONT}-8x16 > font8x16.c && ${CC} -c ${CFLAGS} font8x16.c" \
+ no-implicit-rule before-depend \
+ clean "${STD8X16FONT}-8x16 font8x16.c"
+# make sure apm.h gets made correctly
+apm.h standard \
+ compile-with "echo '#define NAPM 0' > apm.h" \
+ no-obj no-implicit-rule before-depend
+#
+atkbdmap.h optional atkbd_dflt_keymap \
+ compile-with "/usr/sbin/kbdcontrol -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \
+ no-obj no-implicit-rule before-depend \
+ clean "atkbdmap.h"
+#
+ia64/ia64/ia64-gdbstub.c optional ddb
+ia64/ia64/autoconf.c standard
+ia64/ia64/busdma_machdep.c standard
+ia64/ia64/clock.c standard
+ia64/ia64/clock_if.m standard
+ia64/ia64/db_disasm.c optional ddb
+ia64/ia64/db_interface.c optional ddb
+ia64/ia64/db_trace.c optional ddb
+ia64/ia64/elf_machdep.c standard
+ia64/ia64/exception.s standard
+ia64/ia64/in_cksum.c optional inet
+ia64/ia64/interrupt.c standard
+ia64/ia64/ipl_funcs.c standard
+# locore.s needs to be handled in Makefile to put it first. Otherwise it's
+# now normal.
+# ia64/ia64/locore.s standard
+ia64/ia64/machdep.c standard
+ia64/ia64/mem.c standard
+ia64/ia64/mountroot.c optional slice
+ia64/ia64/mp_machdep.c standard
+ia64/ia64/pal.s standard
+ia64/ia64/perfmon.c optional perfmon profiling-routine
+ia64/ia64/perfmon.c optional perfmon
+ia64/ia64/pmap.c standard
+ia64/ia64/procfs_machdep.c standard
+ia64/ia64/support.s standard
+ia64/ia64/ssc.c standard
+ia64/ia64/swtch.s standard
+ia64/ia64/sys_machdep.c standard
+ia64/ia64/synch_machdep.c standard
+ia64/ia64/trap.c standard
+ia64/ia64/userconfig.c optional userconfig
+ia64/ia64/vm_machdep.c standard
+ia64/isa/isa.c optional isa
+ia64/isa/isa_dma.c optional isa
+dev/advansys/adv_isa.c optional adv isa
+dev/aic/aic_isa.c optional aic isa
+dev/ata/ata-all.c count ata
+dev/ata/ata-disk.c count atadisk
+dev/ata/ata-dma.c optional ata
+dev/ata/atapi-all.c count atapicd
+dev/ata/atapi-all.c count atapifd
+dev/ata/atapi-all.c count atapist
+dev/ata/atapi-cd.c optional atapicd
+dev/ata/atapi-fd.c optional atapifd
+dev/ata/atapi-tape.c optional atapist
+dev/fb/fb.c optional fb
+dev/fb/fb.c optional vga
+dev/fb/splash.c count splash
+dev/fb/vga.c optional vga
+dev/kbd/atkbd.c optional atkbd
+dev/kbd/atkbdc.c count atkbdc
+dev/kbd/kbd.c optional atkbd
+dev/kbd/kbd.c optional kbd
+dev/kbd/kbd.c optional sc
+dev/kbd/kbd.c optional ukbd
+dev/syscons/schistory.c count sc
+dev/syscons/scmouse.c optional sc
+dev/syscons/scterm.c optional sc
+dev/syscons/scterm-dumb.c optional sc
+dev/syscons/scterm-sc.c optional sc
+dev/syscons/scvgarndr.c optional sc vga
+dev/syscons/scvidctl.c optional sc
+dev/syscons/scvtb.c optional sc
+dev/syscons/syscons.c optional sc
+dev/syscons/sysmouse.c optional sc
+isa/atkbd_isa.c optional atkbd
+isa/atkbdc_isa.c optional atkbdc
+isa/fd.c optional fdc
+isa/ppc.c optional ppc
+isa/psm.c optional psm
+isa/sio.c count sio
+isa/syscons_isa.c optional sc
+isa/vga_isa.c optional vga
+kern/subr_diskmbr.c standard
+libkern/ia64/htonl.S standard
+libkern/ia64/htons.S standard
+libkern/ia64/ntohl.S standard
+libkern/ia64/ntohs.S standard
+libkern/ia64/__divsi3.s standard
+libkern/ia64/__modsi3.s standard
+libkern/ia64/__udivsi3.s standard
+libkern/ia64/__umodsi3.s standard
+libkern/ia64/__divdi3.s standard
+libkern/ia64/__moddi3.s standard
+libkern/ia64/__udivdi3.s standard
+libkern/ia64/__umoddi3.s standard
+libkern/bcmp.c standard
+libkern/ffs.c standard
diff --git a/sys/conf/ldscript.ia64 b/sys/conf/ldscript.ia64
new file mode 100644
index 0000000..e018d3c
--- /dev/null
+++ b/sys/conf/ldscript.ia64
@@ -0,0 +1,134 @@
+/* $FreeBSD$ */
+OUTPUT_FORMAT("elf64-ia64-little", "elf64-ia64-little", "elf64-ia64-little")
+OUTPUT_ARCH(ia64)
+ENTRY(__start)
+SEARCH_DIR(/usr/lib);
+kernel_text = 0xe000000000500000;
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0xe000000000500000;
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.text :
+ { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+ .rela.text :
+ { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+ .rel.data :
+ { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+ .rela.data :
+ { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+ .rel.rodata :
+ { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+ .rela.rodata :
+ { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init : { *(.init) } =0x47ff041f
+ .text :
+ {
+ *(.text)
+ *(.stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.gnu.linkonce.t*)
+ } =0x47ff041f
+ _etext = .;
+ PROVIDE (etext = .);
+ .fini : { *(.fini) } =0x47ff041f
+ .rodata : { *(.rodata) *(.gnu.linkonce.r*) }
+ .rodata1 : { *(.rodata1) }
+ .reginfo : { *(.reginfo) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = .;
+ .data :
+ {
+ *(.data)
+ *(.gnu.linkonce.d*)
+ CONSTRUCTORS
+ }
+ .data1 : { *(.data1) }
+ .ctors :
+ {
+ *(.ctors)
+ }
+ .dtors :
+ {
+ *(.dtors)
+ }
+ .plt : { *(.plt) }
+ /* gp points at .got+(1<<21) */
+ __gp = ALIGN(8) + 0x200000;
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata : { *(.sdata) }
+ _edata = .;
+ PROVIDE (edata = .);
+ __bss_start = .;
+ .sbss : { *(.sbss) *(.scommon) }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ }
+ . = ALIGN(64 / 8);
+ _end = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* These must appear regardless of . */
+}
+
diff --git a/sys/conf/options.ia64 b/sys/conf/options.ia64
new file mode 100644
index 0000000..2d92667
--- /dev/null
+++ b/sys/conf/options.ia64
@@ -0,0 +1,55 @@
+# $FreeBSD$
+
+ITANIUM opt_global.h
+
+PPC_PROBE_CHIPSET opt_ppc.h
+PPC_DEBUG opt_ppc.h
+
+SHOW_BUSYBUFS
+PANIC_REBOOT_WAIT_TIME opt_panic.h
+
+MAXCONS opt_syscons.h
+SC_ALT_MOUSE_IMAGE opt_syscons.h
+SC_DEBUG_LEVEL opt_syscons.h
+SC_DFLT_FONT opt_syscons.h
+SC_DISABLE_DDB opt_syscons.h
+SC_DISABLE_REBOOT opt_syscons.h
+SC_HISTORY_SIZE opt_syscons.h
+SC_KERNEL_CONS_ATTR opt_syscons.h
+SC_KERNEL_CONS_REV_ATTR opt_syscons.h
+SC_MOUSE_CHAR opt_syscons.h
+SC_NO_CUTPASTE opt_syscons.h
+SC_NO_FONT_LOADING opt_syscons.h
+SC_NO_HISTORY opt_syscons.h
+SC_NO_SYSMOUSE opt_syscons.h
+SC_NORM_ATTR opt_syscons.h
+SC_NORM_REV_ATTR opt_syscons.h
+SC_PIXEL_MODE opt_syscons.h
+SC_RENDER_DEBUG opt_syscons.h
+SC_TWOBUTTON_MOUSE opt_syscons.h
+
+VGA_ALT_SEQACCESS opt_vga.h
+VGA_DEBUG opt_vga.h
+VGA_NO_FONT_LOADING opt_vga.h
+VGA_NO_MODE_CHANGE opt_vga.h
+VGA_SLOW_IOACCESS opt_vga.h
+VGA_WIDTH90 opt_vga.h
+
+PSM_HOOKRESUME opt_psm.h
+PSM_RESETAFTERSUSPEND opt_psm.h
+PSM_DEBUG opt_psm.h
+
+# Atkbd options
+ATKBD_DFLT_KEYMAP opt_atkbd.h
+
+# Kbd options
+KBD_DISABLE_KEYMAP_LOAD opt_kbd.h
+KBD_INSTALL_CDEV opt_kbd.h
+KBD_MAXRETRY opt_kbd.h
+KBD_MAXWAIT opt_kbd.h
+KBD_RESETDELAY opt_kbd.h
+KBDIO_DEBUG opt_kbd.h
+
+# Clock options
+CLK_USE_I8254_CALIBRATION opt_clock.h
+TIMER_FREQ opt_clock.h
diff --git a/sys/ia64/conf/GENERIC b/sys/ia64/conf/GENERIC
new file mode 100644
index 0000000..c5f9724
--- /dev/null
+++ b/sys/ia64/conf/GENERIC
@@ -0,0 +1,165 @@
+#
+# GENERIC -- Generic kernel configuration file for FreeBSD/alpha
+#
+# For more information on this file, please read the handbook section on
+# Kernel Configuration Files:
+#
+# http://www.FreeBSD.org/handbook/kernelconfig-config.html
+#
+# The handbook is also available locally in /usr/share/doc/handbook
+# if you've installed the doc distribution, otherwise always see the
+# FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the
+# latest information.
+#
+# An exhaustive list of options and more detailed explanations of the
+# device lines is also present in the ../../i386/conf/NOTES file.
+# If you are in doubt as to the purpose or necessity of a line, check first
+# in NOTES. Please note that this is the i386 NOTES, but it still contains
+# valuable info for alpha too.
+#
+# For hardware specific information check HARDWARE.TXT
+#
+# $FreeBSD$
+
+machine ia64
+cpu ITANIUM
+ident GENERIC
+maxusers 32
+
+#To statically compile in device wiring instead of /boot/device.hints
+#hints "GENERIC.hints"
+
+#makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols
+
+options INET #InterNETworking
+options INET6 #IPv6 communications protocols
+options FFS #Berkeley Fast Filesystem
+options FFS_ROOT #FFS usable as root device [keep this!]
+options SOFTUPDATES #Enable FFS soft updates support
+options MFS #Memory Filesystem
+#options DEVFS #Device Filesystem
+options MD_ROOT #MD is a potential root device
+options NFS #Network Filesystem
+options NFS_ROOT #NFS usable as root device
+options MSDOSFS #MSDOS Filesystem
+options CD9660 #ISO 9660 Filesystem
+options CD9660_ROOT #CD-ROM usable as root device
+options PROCFS #Process filesystem
+options COMPAT_43 #Compatible with BSD 4.3 [KEEP THIS!]
+options SCSI_DELAY=15000 #Delay (in ms) before probing SCSI
+options UCONSOLE #Allow users to grab the console
+options KTRACE #ktrace(1) syscall trace support
+options SYSVSHM #SYSV-style shared memory
+options SYSVMSG #SYSV-style message queues
+options SYSVSEM #SYSV-style semaphores
+options P1003_1B #Posix P1003_1B real-time extentions
+options _KPOSIX_PRIORITY_SCHEDULING
+
+# Standard busses
+device isa
+device pci
+options COMPAT_OLDPCI # PCI compatability shims
+
+# Floppy drives
+device fdc
+
+# ATA and ATAPI devices
+device ata
+device atadisk # ATA disk drives
+device atapicd # ATAPI CDROM drives
+device atapifd # ATAPI floppy drives
+device atapist # ATAPI tape drives
+
+# SCSI Controllers
+device ahc # AHA2940 and onboard AIC7xxx devices
+#device esp # 53C94 & friends, not CAM-ified
+device isp # Qlogic family
+#device ncr # NCR/Symbios Logic
+device sym # NCR/Symbios Logic (newer chipsets + those of `ncr')
+
+# SCSI peripherals
+device scbus # SCSI bus (required)
+device da # Direct Access (disks)
+device sa # Sequential Access (tape etc)
+device cd # CD
+device pass # Passthrough device (direct SCSI access)
+
+# RAID controllers
+device amr # AMI MegaRAID
+device mlx # Mylex DAC960 family
+
+# atkbdc0 controls both the keyboard and the PS/2 mouse
+device atkbdc 1
+device atkbd
+device psm
+
+#device vga
+
+# splash screen/screen saver
+device splash
+
+# syscons is the default console driver, resembling an SCO console
+#device sc 1
+
+# real time clock
+device mcclock
+
+# Serial (COM) ports
+device sio
+
+# Parallel port
+device ppc
+device ppbus # Parallel port bus (required)
+device lpt # Printer
+device plip # TCP/IP over parallel
+device ppi # Parallel port interface device
+#device vpo # Requires scbus and da
+
+# PCI Ethernet NICs.
+device de # DEC/Intel DC21x4x (``Tulip'')
+device fxp # Intel EtherExpress PRO/100B (82557, 82558)
+device wx # Intel Gigabit Ethernet Card (``Wiseman'')
+
+# PCI Ethernet NICs that use the common MII bus controller code.
+device miibus # MII bus support
+device dc # DEC/Intel 21143 and workalikes
+device rl # RealTek 8129/8139
+device sf # Adaptec AIC-6915 (``Starfire'')
+device sis # Silicon Integrated Systems SiS 900/SiS 7016
+device ste # Sundance ST201 (D-Link DFE-550TX)
+device tl # Texas Instruments ThunderLAN
+device tx # SMC 9432TX (83c170 ``EPIC'')
+device vr # VIA Rhine, Rhine II
+device wb # Winbond W89C840F
+device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'')
+
+# Pseudo devices - the number indicates how many units to allocated.
+device random # Entropy device
+device loop # Network loopback
+device ether # Ethernet support
+device sl # Kernel SLIP
+device ppp 1 # Kernel PPP
+device tun # Packet tunnel.
+device pty # Pseudo-ttys (telnet etc)
+device md # Memory "disks"
+device gif 4 # IPv6 and IPv4 tunneling
+device faith 1 # IPv6-to-IPv4 relaying/(translation)
+
+# The `bpf' device enables the Berkeley Packet Filter.
+# Be aware of the administrative consequences of enabling this!
+device bpf #Berkeley packet filter
+
+# USB support
+device uhci # UHCI PCI->USB interface
+device ohci # OHCI PCI->USB interface
+device usb # USB Bus (required)
+device ugen # Generic
+device uhid # "Human Interface Devices"
+device ukbd # Keyboard
+device ulpt # Printer
+device umass # Disks/Mass storage - Requires scbus and da0
+device ums # Mouse
+# USB Ethernet
+device aue # ADMtek USB ethernet
+device cue # CATC USB ethernet
+device kue # Kawasaki LSI USB ethernet
diff --git a/sys/ia64/conf/GENERIC.hints b/sys/ia64/conf/GENERIC.hints
new file mode 100644
index 0000000..a007536
--- /dev/null
+++ b/sys/ia64/conf/GENERIC.hints
@@ -0,0 +1,26 @@
+# $FreeBSD$
+hint.fdc.0.at="isa"
+hint.fdc.0.port="0x3F0"
+hint.fdc.0.irq="6"
+hint.fdc.0.drq="2"
+hint.fd.0.at="fdc0"
+hint.fd.0.drive="0"
+hint.atkbdc.0.at="isa"
+hint.atkbdc.0.port="0x060"
+hint.atkbd.0.at="atkbdc"
+hint.atkbd.0.irq="1"
+hint.psm.0.at="atkbdc"
+hint.psm.0.irq="12"
+hint.vga.0.at="isa"
+hint.sc.0.at="isa"
+hint.mcclock.0.at="isa"
+hint.mcclock.0.port="0x70"
+hint.sio.0.at="isa"
+hint.sio.0.port="0x3F8"
+hint.sio.0.irq="4"
+hint.sio.1.at="isa"
+hint.sio.1.port="0x2F8"
+hint.sio.1.irq="3"
+hint.sio.1.flags="0x50"
+hint.ppc.0.at="isa"
+hint.ppc.0.irq="7"
diff --git a/sys/ia64/ia64/autoconf.c b/sys/ia64/ia64/autoconf.c
new file mode 100644
index 0000000..66230b7
--- /dev/null
+++ b/sys/ia64/ia64/autoconf.c
@@ -0,0 +1,121 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_bootp.h"
+#include "opt_ffs.h"
+#include "opt_cd9660.h"
+#include "opt_nfs.h"
+#include "opt_nfsroot.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/disklabel.h>
+#include <sys/diskslice.h> /* for BASE_SLICE, MAX_SLICES */
+#include <sys/reboot.h>
+#include <sys/kernel.h>
+#include <sys/mount.h>
+#include <sys/sysctl.h>
+#include <sys/bus.h>
+#include <sys/devicestat.h>
+#include <sys/cons.h>
+
+#include <machine/ipl.h>
+#include <machine/md_var.h>
+#include <machine/bootinfo.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_debug.h>
+
+static void configure __P((void *));
+SYSINIT(configure, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL)
+
+#include "isa.h"
+#if NISA > 0
+#include <isa/isavar.h>
+device_t isa_bus_device = 0;
+#endif
+
+extern int nfs_diskless_valid;
+
+dev_t rootdev = NODEV;
+dev_t dumpdev = NODEV;
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+static void
+configure(void *dummy)
+{
+ device_add_child(root_bus, "nexus", 0);
+
+ root_bus_configure();
+
+ /*
+ * Probe ISA devices after everything.
+ */
+#if NISA > 0
+ if (isa_bus_device)
+ isa_probe_children(isa_bus_device);
+#endif
+
+ cninit_finish();
+
+ /*
+ * Now we're ready to handle (pending) interrupts.
+ * XXX this is slightly misplaced.
+ */
+ spl0();
+
+ cold = 0;
+}
+
+/*
+ * Do legacy root filesystem discovery. This isn't really
+ * needed on the Alpha, which has always used the loader.
+ */
+void
+cpu_rootconf()
+{
+ int order = 0;
+#if defined(NFS) && defined(NFS_ROOT)
+#if !defined(BOOTP_NFSROOT)
+ if (nfs_diskless_valid)
+#endif
+ rootdevnames[order++] = "nfs:";
+#endif
+
+#if defined(FFS) && defined(FFS_ROOT)
+ rootdevnames[order++] = "ufs:da0a";
+#endif
+}
+SYSINIT(cpu_rootconf, SI_SUB_ROOT_CONF, SI_ORDER_FIRST, cpu_rootconf, NULL)
diff --git a/sys/ia64/ia64/busdma_machdep.c b/sys/ia64/ia64/busdma_machdep.c
new file mode 100644
index 0000000..6696c58
--- /dev/null
+++ b/sys/ia64/ia64/busdma_machdep.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <machine/bus.h>
+#include <machine/md_var.h>
+
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX_BPAGES 128
+
+struct bus_dma_tag {
+ bus_dma_tag_t parent;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_dma_filter_t *filter;
+ void *filterarg;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ int ref_count;
+ int map_count;
+};
+
+struct bounce_page {
+ vm_offset_t vaddr; /* kva of bounce buffer */
+ bus_addr_t busaddr; /* Physical address */
+ vm_offset_t datavaddr; /* kva of client data */
+ bus_size_t datacount; /* client data count */
+ STAILQ_ENTRY(bounce_page) links;
+};
+
+int busdma_swi_pending;
+
+static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
+static int free_bpages;
+static int reserved_bpages;
+static int active_bpages;
+static int total_bpages;
+static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
+
+struct bus_dmamap {
+ struct bp_list bpages;
+ int pagesneeded;
+ int pagesreserved;
+ bus_dma_tag_t dmat;
+ void *buf; /* unmapped buffer pointer */
+ bus_size_t buflen; /* unmapped buffer length */
+ vm_offset_t busaddress; /* address in bus space */
+ bus_dmamap_callback_t *callback;
+ void *callback_arg;
+ void *sgmaphandle; /* handle into sgmap */
+ STAILQ_ENTRY(bus_dmamap) links;
+};
+
+static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
+static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
+static struct bus_dmamap nobounce_dmamap;
+
+static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
+static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_offset_t vaddr, bus_size_t size);
+static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
+
+static __inline int
+run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
+{
+ int retval;
+
+ retval = 0;
+ do {
+ if (paddr > dmat->lowaddr
+ && paddr <= dmat->highaddr
+ && (dmat->filter == NULL
+ || (*dmat->filter)(dmat->filterarg, paddr) != 0))
+ retval = 1;
+
+ dmat = dmat->parent;
+ } while (retval == 0 && dmat != NULL);
+ return (retval);
+}
+
+#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
+/*
+ * Allocate a device specific dma_tag.
+ */
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_size_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filter,
+ void *filterarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
+{
+ bus_dma_tag_t newtag;
+ int error = 0;
+
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
+
+ newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
+ if (newtag == NULL)
+ return (ENOMEM);
+
+ newtag->parent = parent;
+ newtag->alignment = alignment;
+ newtag->boundary = boundary;
+ newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
+ newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
+ newtag->filter = filter;
+ newtag->filterarg = filterarg;
+ newtag->maxsize = maxsize;
+ newtag->nsegments = nsegments;
+ newtag->maxsegsz = maxsegsz;
+ newtag->flags = flags;
+ newtag->ref_count = 1; /* Count ourself */
+ newtag->map_count = 0;
+
+ /* Take into account any restrictions imposed by our parent tag */
+ if (parent != NULL) {
+ newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
+ newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
+ /*
+ * XXX Not really correct??? Probably need to honor boundary
+ * all the way up the inheritence chain.
+ */
+ newtag->boundary = MAX(parent->boundary, newtag->boundary);
+ if (newtag->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ newtag->filter = parent->filter;
+ newtag->filterarg = parent->filterarg;
+ newtag->parent = parent->parent;
+ }
+ if (newtag->parent != NULL) {
+ parent->ref_count++;
+ }
+ }
+
+ if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
+ /* Must bounce */
+
+ if (lowaddr > bounce_lowaddr) {
+ /*
+ * Go through the pool and kill any pages
+ * that don't reside below lowaddr.
+ */
+ panic("bus_dma_tag_create: page reallocation "
+ "not implemented");
+ }
+ if (ptoa(total_bpages) < maxsize) {
+ int pages;
+
+ pages = atop(maxsize) - total_bpages;
+
+ /* Add pages to our bounce pool */
+ if (alloc_bounce_pages(newtag, pages) < pages)
+ error = ENOMEM;
+ }
+ /* Performed initial allocation */
+ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
+ }
+
+ if (error != 0) {
+ free(newtag, M_DEVBUF);
+ } else {
+ *dmat = newtag;
+ }
+ return (error);
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+ if (dmat != NULL) {
+
+ if (dmat->map_count != 0)
+ return (EBUSY);
+
+ while (dmat != NULL) {
+ bus_dma_tag_t parent;
+
+ parent = dmat->parent;
+ dmat->ref_count--;
+ if (dmat->ref_count == 0) {
+ free(dmat, M_DEVBUF);
+ }
+ dmat = parent;
+ }
+ }
+ return (0);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ int error;
+
+ error = 0;
+
+ if (dmat->flags & BUS_DMA_ISA) {
+ bus_dmamap_t map;
+ map = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
+ M_NOWAIT);
+ if (map == NULL) {
+ return (ENOMEM);
+ } else {
+ bzero(map, sizeof(*map));
+#if 0
+ map->busaddress =
+ sgmap_alloc_region(chipset.sgmap,
+ dmat->maxsize,
+ dmat->boundary,
+ &map->sgmaphandle);
+#endif
+ dmat->map_count++;
+ *mapp = map;
+ return (0);
+ }
+ }
+
+ if (dmat->lowaddr < ptoa(Maxmem)) {
+ /* Must bounce */
+ int maxpages;
+
+ *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
+ M_NOWAIT);
+ if (*mapp == NULL) {
+ return (ENOMEM);
+ } else {
+ /* Initialize the new map */
+ bzero(*mapp, sizeof(**mapp));
+ STAILQ_INIT(&((*mapp)->bpages));
+ }
+ /*
+ * Attempt to add pages to our pool on a per-instance
+ * basis up to a sane limit.
+ */
+ maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
+ if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
+ || (dmat->map_count > 0
+ && total_bpages < maxpages)) {
+ int pages;
+
+ if (dmat->lowaddr > bounce_lowaddr) {
+ /*
+ * Go through the pool and kill any pages
+ * that don't reside below lowaddr.
+ */
+ panic("bus_dmamap_create: page reallocation "
+ "not implemented");
+ }
+ pages = atop(dmat->maxsize);
+ pages = MIN(maxpages - total_bpages, pages);
+ error = alloc_bounce_pages(dmat, pages);
+
+ if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
+ if (error == 0)
+ dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
+ } else {
+ error = 0;
+ }
+ }
+ } else {
+ *mapp = &nobounce_dmamap;
+ }
+ if (error == 0)
+ dmat->map_count++;
+ return (error);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ if (dmat->flags & BUS_DMA_ISA) {
+#if 0
+ sgmap_free_region(chipset.sgmap, map->sgmaphandle);
+#endif
+ }
+
+ if (map != NULL) {
+ if (STAILQ_FIRST(&map->bpages) != NULL)
+ return (EBUSY);
+ free(map, M_DEVBUF);
+ }
+ dmat->map_count--;
+ return (0);
+}
+
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ /* If we succeed, no mapping/bouncing will be required */
+ *mapp = &nobounce_dmamap;
+
+ if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
+ *vaddr = malloc(dmat->maxsize, M_DEVBUF,
+ (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
+ } else {
+ /*
+ * XXX Use Contigmalloc until it is merged into this facility
+ * and handles multi-seg allocations. Nobody is doing
+ * multi-seg allocations yet though.
+ */
+ *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
+ (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
+ 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
+ dmat->boundary);
+ }
+ if (*vaddr == NULL)
+ return (ENOMEM);
+ return (0);
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc.
+ */
+void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ /*
+ * dmamem does not need to be bounced, so the map should be
+ * NULL
+ */
+ if (map != &nobounce_dmamap)
+ panic("bus_dmamem_free: Invalid map freed\n");
+ free(vaddr, M_DEVBUF);
+}
+
+#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+
+vm_offset_t alpha_XXX_dmamap_or = 1024UL*1024UL*1024UL; /*XXX */
+
+int
+bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags)
+{
+ vm_offset_t vaddr;
+ vm_offset_t paddr;
+#ifdef __GNUC__
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+ bus_dma_segment_t *sg;
+ int seg;
+ int error;
+ vm_offset_t nextpaddr;
+
+ error = 0;
+
+ if (dmat->flags & BUS_DMA_ISA) {
+ /*
+ * For ISA dma, we use the chipset's scatter-gather
+ * map to map the tranfer into the ISA reachable range
+ * of the bus address space.
+ */
+ vaddr = trunc_page((vm_offset_t) buf);
+ dm_segments[0].ds_addr =
+ map->busaddress + (vm_offset_t) buf - vaddr;
+ dm_segments[0].ds_len = buflen;
+ buflen = round_page((vm_offset_t) buf + buflen) - vaddr;
+#if 0
+ sgmap_load_region(chipset.sgmap,
+ map->busaddress,
+ vaddr,
+ buflen);
+#endif
+ map->buflen = buflen;
+ (*callback)(callback_arg, dm_segments, 1, error);
+
+ return (0);
+ }
+
+ /*
+ * If we are being called during a callback, pagesneeded will
+ * be non-zero, so we can avoid doing the work twice.
+ */
+ if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
+ vm_offset_t vendaddr;
+
+ /*
+ * Count the number of bounce pages
+ * needed in order to complete this transfer
+ */
+ vaddr = trunc_page(buf);
+ vendaddr = (vm_offset_t)buf + buflen;
+
+ while (vaddr < vendaddr) {
+ paddr = pmap_kextract(vaddr);
+ if (run_filter(dmat, paddr) != 0) {
+
+ map->pagesneeded++;
+ }
+ vaddr += PAGE_SIZE;
+ }
+ }
+
+ /* Reserve Necessary Bounce Pages */
+ if (map->pagesneeded != 0) {
+ int s;
+
+ s = splhigh();
+ if (reserve_bounce_pages(dmat, map) != 0) {
+
+ /* Queue us for resources */
+ map->dmat = dmat;
+ map->buf = buf;
+ map->buflen = buflen;
+ map->callback = callback;
+ map->callback_arg = callback_arg;
+
+ STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
+ splx(s);
+
+ return (EINPROGRESS);
+ }
+ splx(s);
+ }
+
+ vaddr = (vm_offset_t)buf;
+ sg = &dm_segments[0];
+ seg = 1;
+ sg->ds_len = 0;
+
+ nextpaddr = 0;
+
+ do {
+ bus_size_t size;
+
+ paddr = pmap_kextract(vaddr);
+ size = PAGE_SIZE - (paddr & PAGE_MASK);
+ if (size > buflen)
+ size = buflen;
+
+ if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
+ paddr = add_bounce_page(dmat, map, vaddr, size);
+ }
+
+ if (sg->ds_len == 0) {
+ sg->ds_addr = paddr + alpha_XXX_dmamap_or;
+ sg->ds_len = size;
+ } else if (paddr == nextpaddr) {
+ sg->ds_len += size;
+ } else {
+ /* Go to the next segment */
+ sg++;
+ seg++;
+ if (seg > dmat->nsegments)
+ break;
+ sg->ds_addr = paddr + alpha_XXX_dmamap_or;
+ sg->ds_len = size;
+ }
+ vaddr += size;
+ nextpaddr = paddr + size;
+ buflen -= size;
+
+ } while (buflen > 0);
+
+ if (buflen != 0) {
+ printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
+ buflen);
+ error = EFBIG;
+ }
+
+ (*callback)(callback_arg, dm_segments, seg, error);
+
+ return (0);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+void
+_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ struct bounce_page *bpage;
+
+ if (dmat->flags & BUS_DMA_ISA) {
+#if 0
+ sgmap_unload_region(chipset.sgmap,
+ map->busaddress,
+ map->buflen);
+#endif
+ return;
+ }
+
+ while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+ STAILQ_REMOVE_HEAD(&map->bpages, links);
+ free_bounce_page(dmat, bpage);
+ }
+}
+
+void
+_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+ struct bounce_page *bpage;
+
+ if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+
+ /*
+ * Handle data bouncing. We might also
+ * want to add support for invalidating
+ * the caches on broken hardware
+ */
+ switch (op) {
+ case BUS_DMASYNC_PREWRITE:
+ while (bpage != NULL) {
+ bcopy((void *)bpage->datavaddr,
+ (void *)bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ break;
+
+ case BUS_DMASYNC_POSTREAD:
+ while (bpage != NULL) {
+ bcopy((void *)bpage->vaddr,
+ (void *)bpage->datavaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ break;
+ case BUS_DMASYNC_PREREAD:
+ case BUS_DMASYNC_POSTWRITE:
+ /* No-ops */
+ break;
+ }
+ }
+}
+
+static int
+alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
+{
+ int count;
+
+ count = 0;
+ if (total_bpages == 0) {
+ STAILQ_INIT(&bounce_page_list);
+ STAILQ_INIT(&bounce_map_waitinglist);
+ STAILQ_INIT(&bounce_map_callbacklist);
+ }
+
+ while (numpages > 0) {
+ struct bounce_page *bpage;
+ int s;
+
+ bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
+ M_NOWAIT);
+
+ if (bpage == NULL)
+ break;
+ bzero(bpage, sizeof(*bpage));
+ bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
+ M_NOWAIT, 0ul,
+ dmat->lowaddr,
+ PAGE_SIZE,
+ 0);
+ if (bpage->vaddr == NULL) {
+ free(bpage, M_DEVBUF);
+ break;
+ }
+ bpage->busaddr = pmap_kextract(bpage->vaddr);
+ s = splhigh();
+ STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
+ total_bpages++;
+ free_bpages++;
+ splx(s);
+ count++;
+ numpages--;
+ }
+ return (count);
+}
+
+static int
+reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ int pages;
+
+ pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
+ free_bpages -= pages;
+ reserved_bpages += pages;
+ map->pagesreserved += pages;
+ pages = map->pagesneeded - map->pagesreserved;
+
+ return (pages);
+}
+
+static vm_offset_t
+add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
+ bus_size_t size)
+{
+ int s;
+ struct bounce_page *bpage;
+
+ if (map->pagesneeded == 0)
+ panic("add_bounce_page: map doesn't need any pages");
+ map->pagesneeded--;
+
+ if (map->pagesreserved == 0)
+ panic("add_bounce_page: map doesn't need any pages");
+ map->pagesreserved--;
+
+ s = splhigh();
+ bpage = STAILQ_FIRST(&bounce_page_list);
+ if (bpage == NULL)
+ panic("add_bounce_page: free page list is empty");
+
+ STAILQ_REMOVE_HEAD(&bounce_page_list, links);
+ reserved_bpages--;
+ active_bpages++;
+ splx(s);
+
+ bpage->datavaddr = vaddr;
+ bpage->datacount = size;
+ STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
+ return (bpage->busaddr);
+}
+
+static void
+free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
+{
+ int s;
+ struct bus_dmamap *map;
+
+ bpage->datavaddr = 0;
+ bpage->datacount = 0;
+
+ s = splhigh();
+ STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
+ free_bpages++;
+ active_bpages--;
+ if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
+ if (reserve_bounce_pages(map->dmat, map) == 0) {
+ STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
+ STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
+ map, links);
+ busdma_swi_pending = 1;
+ setsoftvm();
+ }
+ }
+ splx(s);
+}
+
+void
+busdma_swi(void)
+{
+ int s;
+ struct bus_dmamap *map;
+
+ s = splhigh();
+ while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
+ STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
+ splx(s);
+ bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
+ map->callback, map->callback_arg, /*flags*/0);
+ s = splhigh();
+ }
+ splx(s);
+}
diff --git a/sys/ia64/ia64/clock.c b/sys/ia64/ia64/clock.c
new file mode 100644
index 0000000..06a7f0c
--- /dev/null
+++ b/sys/ia64/ia64/clock.c
@@ -0,0 +1,511 @@
+/* $FreeBSD$ */
+/* $NetBSD: clock.c,v 1.20 1998/01/31 10:32:47 ross Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: clock.c 1.18 91/01/21
+ *
+ * @(#)clock.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/timetc.h>
+
+#include <machine/clock.h>
+#include <machine/clockvar.h>
+#include <isa/isareg.h>
+#include <ia64/ia64/timerreg.h>
+
+#define SECMIN ((unsigned)60) /* seconds per minute */
+#define SECHOUR ((unsigned)(60*SECMIN)) /* seconds per hour */
+#define SECDAY ((unsigned)(24*SECHOUR)) /* seconds per day */
+#define SECYR ((unsigned)(365*SECDAY)) /* seconds per common year */
+
+/*
+ * 32-bit time_t's can't reach leap years before 1904 or after 2036, so we
+ * can use a simple formula for leap years.
+ */
+#define LEAPYEAR(y) (((y) % 4) == 0)
+
+device_t clockdev;
+int clockinitted;
+int tickfix;
+int tickfixinterval;
+int adjkerntz; /* local offset from GMT in seconds */
+int disable_rtc_set; /* disable resettodr() if != 0 */
+int wall_cmos_clock; /* wall CMOS clock assumed if != 0 */
+static int beeping = 0;
+
+extern int cycles_per_sec;
+
+static timecounter_get_t ia64_get_timecount;
+
+static struct timecounter ia64_timecounter = {
+ ia64_get_timecount, /* get_timecount */
+ 0, /* no poll_pps */
+ ~0u, /* counter_mask */
+ 0, /* frequency */
+ "alpha" /* name */
+};
+
+SYSCTL_OPAQUE(_debug, OID_AUTO, ia64_timecounter, CTLFLAG_RD,
+ &ia64_timecounter, sizeof(ia64_timecounter), "S,timecounter", "");
+
+/* Values for timerX_state: */
+#define RELEASED 0
+#define RELEASE_PENDING 1
+#define ACQUIRED 2
+#define ACQUIRE_PENDING 3
+
+/* static u_char timer0_state; */
+static u_char timer2_state;
+
+/*
+ * Algorithm for missed clock ticks from Linux/alpha.
+ */
+
+/*
+ * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting
+ * by 48 gives us 16 bits for HZ while keeping the accuracy good even
+ * for large CPU clock rates.
+ */
+#define FIX_SHIFT 48
+
+static u_int64_t scaled_ticks_per_cycle;
+static u_int32_t max_cycles_per_tick;
+static u_int32_t last_time;
+
+static void handleclock(void* arg);
+static u_int32_t calibrate_clocks(u_int32_t firmware_freq);
+
+void
+clockattach(device_t dev)
+{
+
+ /*
+ * Just bookkeeping.
+ */
+ if (clockdev)
+ panic("clockattach: multiple clocks");
+ clockdev = dev;
+ cycles_per_sec = calibrate_clocks(cycles_per_sec);
+#ifdef EVCNT_COUNTERS
+ evcnt_attach(dev, "intr", &clock_intr_evcnt);
+#endif
+}
+
+/*
+ * Machine-dependent clock routines.
+ *
+ * Startrtclock restarts the real-time clock, which provides
+ * hardclock interrupts to kern_clock.c.
+ *
+ * Inittodr initializes the time of day hardware which provides
+ * date functions. Its primary function is to use some file
+ * system information in case the hardare clock lost state.
+ *
+ * Resettodr restores the time of day hardware after a time change.
+ */
+
+/*
+ * Start the real-time and statistics clocks. Leave stathz 0 since there
+ * are no other timers available.
+ */
+void
+cpu_initclocks()
+{
+ u_int32_t freq;
+
+ if (clockdev == NULL)
+ panic("cpu_initclocks: no clock attached");
+
+ tick = 1000000 / hz; /* number of microseconds between interrupts */
+ tickfix = 1000000 - (hz * tick);
+ if (tickfix) {
+ int ftp;
+
+ ftp = min(ffs(tickfix), ffs(hz));
+ tickfix >>= (ftp - 1);
+ tickfixinterval = hz >> (ftp - 1);
+ }
+
+ /*
+ * Establish the clock interrupt; it's a special case.
+ *
+ * We establish the clock interrupt this late because if
+ * we do it at clock attach time, we may have never been at
+ * spl0() since taking over the system. Some versions of
+ * PALcode save a clock interrupt, which would get delivered
+ * when we spl0() in autoconf.c. If established the clock
+ * interrupt handler earlier, that interrupt would go to
+ * hardclock, which would then fall over because p->p_stats
+ * isn't set at that time.
+ */
+ freq = cycles_per_sec;
+ last_time = ia64_read_itc();
+ scaled_ticks_per_cycle = ((u_int64_t)hz << FIX_SHIFT) / freq;
+ max_cycles_per_tick = 2*freq / hz;
+
+ ia64_timecounter.tc_frequency = freq;
+ tc_init(&ia64_timecounter);
+
+ stathz = 128;
+
+ /*
+ * Get the clock started.
+ */
+ CLOCK_INIT(clockdev);
+}
+
+static u_int32_t
+calibrate_clocks(u_int32_t firmware_freq)
+{
+ u_int32_t start_pcc, stop_pcc;
+ int sec, start_sec;
+
+ if (bootverbose)
+ printf("Calibrating clock(s) ... ");
+
+ /* Read the mc146818A seconds counter. */
+ if (CLOCK_GETSECS(clockdev, &sec))
+ goto fail;
+
+ /* Wait for the mC146818A seconds counter to change. */
+ start_sec = sec;
+ for (;;) {
+ if (CLOCK_GETSECS(clockdev, &sec))
+ goto fail;
+ if (sec != start_sec)
+ break;
+ }
+
+ /* Start keeping track of the PCC. */
+ start_pcc = ia64_read_itc();
+
+ /*
+ * Wait for the mc146818A seconds counter to change.
+ */
+ start_sec = sec;
+ for (;;) {
+ if (CLOCK_GETSECS(clockdev, &sec))
+ goto fail;
+ if (sec != start_sec)
+ break;
+ }
+
+ /*
+ * Read the PCC again to work out frequency.
+ */
+ stop_pcc = ia64_read_itc();
+
+ if (bootverbose) {
+ printf("PCC clock: %u Hz (firmware %u Hz)\n",
+ stop_pcc - start_pcc, firmware_freq);
+ }
+ return (stop_pcc - start_pcc);
+
+fail:
+ if (bootverbose)
+ printf("failed, using firmware default of %u Hz\n",
+ firmware_freq);
+ return (firmware_freq);
+}
+
+static void
+handleclock(void* arg)
+{
+ u_int32_t now = ia64_read_itc();
+ u_int32_t delta = now - last_time;
+ last_time = now;
+
+ if (delta > max_cycles_per_tick) {
+ int i, missed_ticks;
+ missed_ticks = (delta * scaled_ticks_per_cycle) >> FIX_SHIFT;
+ for (i = 0; i < missed_ticks; i++)
+ hardclock(arg);
+ }
+ hardclock(arg);
+ setdelayed();
+}
+
+/*
+ * We assume newhz is either stathz or profhz, and that neither will
+ * change after being set up above. Could recalculate intervals here
+ * but that would be a drag.
+ */
+void
+setstatclockrate(newhz)
+ int newhz;
+{
+
+ /* nothing we can do */
+}
+
+/*
+ * This code is defunct after 2099.
+ * Will Unix still be here then??
+ */
+static short dayyr[12] = {
+ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
+};
+
+/*
+ * Initialze the time of day register, based on the time base which is, e.g.
+ * from a filesystem. Base provides the time to within six months,
+ * and the time of year clock (if any) provides the rest.
+ */
+void
+inittodr(base)
+ time_t base;
+{
+ register int days, yr;
+ struct clocktime ct;
+ time_t deltat;
+ int badbase;
+ int s;
+ struct timespec ts;
+
+ if (base < 5*SECYR) {
+ printf("WARNING: preposterous time in file system");
+ /* read the system clock anyway */
+ base = 6*SECYR + 186*SECDAY + SECDAY/2;
+ badbase = 1;
+ } else
+ badbase = 0;
+
+ CLOCK_GET(clockdev, base, &ct);
+ clockinitted = 1;
+
+ /* simple sanity checks */
+ if (ct.year < 70 || ct.mon < 1 || ct.mon > 12 || ct.day < 1 ||
+ ct.day > 31 || ct.hour > 23 || ct.min > 59 || ct.sec > 59) {
+ /*
+ * Believe the time in the file system for lack of
+ * anything better, resetting the TODR.
+ */
+ s = splclock();
+ ts.tv_sec = base;
+ ts.tv_nsec = 0;
+ tc_setclock(&ts);
+ splx(s);
+ if (!badbase) {
+ printf("WARNING: preposterous clock chip time\n");
+ resettodr();
+ }
+ goto bad;
+ }
+ days = 0;
+ for (yr = 70; yr < ct.year; yr++)
+ days += LEAPYEAR(yr) ? 366 : 365;
+ days += dayyr[ct.mon - 1] + ct.day - 1;
+ if (LEAPYEAR(yr) && ct.mon > 2)
+ days++;
+ /* now have days since Jan 1, 1970; the rest is easy... */
+ s = splclock();
+ ts.tv_sec =
+ days * SECDAY + ct.hour * SECHOUR + ct.min * SECMIN + ct.sec;
+ if (wall_cmos_clock)
+ ts.tv_sec += adjkerntz;
+ ts.tv_nsec = 0;
+ tc_setclock(&ts);
+ splx(s);
+
+ if (!badbase) {
+ /*
+ * See if we gained/lost two or more days;
+ * if so, assume something is amiss.
+ */
+ deltat = ts.tv_sec - base;
+ if (deltat < 0)
+ deltat = -deltat;
+ if (deltat < 2 * SECDAY)
+ return;
+ printf("WARNING: clock %s %d days",
+ ts.tv_sec < base ? "lost" : "gained", deltat / SECDAY);
+ }
+bad:
+ printf(" -- CHECK AND RESET THE DATE!\n");
+}
+
+/*
+ * Reset the TODR based on the time value; used when the TODR
+ * has a preposterous value and also when the time is reset
+ * by the stime system call. Also called when the TODR goes past
+ * TODRZERO + 100*(SECYEAR+2*SECDAY) (e.g. on Jan 2 just after midnight)
+ * to wrap the TODR around.
+ */
+void
+resettodr()
+{
+ register int t, t2, s;
+ struct clocktime ct;
+ unsigned long tm;
+
+ if (disable_rtc_set)
+ return;
+
+ s = splclock();
+ tm = time_second;
+ splx(s);
+
+ if (!clockinitted)
+ return;
+
+ /* Calculate local time to put in RTC */
+ tm -= (wall_cmos_clock ? adjkerntz : 0);
+
+ /* compute the day of week. */
+ t2 = tm / SECDAY;
+ ct.dow = (t2 + 4) % 7; /* 1/1/1970 was thursday */
+
+ /* compute the year */
+ ct.year = 69;
+ t = t2; /* XXX ? */
+ while (t2 >= 0) { /* whittle off years */
+ t = t2;
+ ct.year++;
+ t2 -= LEAPYEAR(ct.year) ? 366 : 365;
+ }
+
+ /* t = month + day; separate */
+ t2 = LEAPYEAR(ct.year);
+ for (ct.mon = 1; ct.mon < 12; ct.mon++)
+ if (t < dayyr[ct.mon] + (t2 && ct.mon > 1))
+ break;
+
+ ct.day = t - dayyr[ct.mon - 1] + 1;
+ if (t2 && ct.mon > 2)
+ ct.day--;
+
+ /* the rest is easy */
+ t = tm % SECDAY;
+ ct.hour = t / SECHOUR;
+ t %= 3600;
+ ct.min = t / SECMIN;
+ ct.sec = t % SECMIN;
+
+ CLOCK_SET(clockdev, &ct);
+}
+
+static unsigned
+ia64_get_timecount(struct timecounter* tc)
+{
+ return ia64_read_itc();
+}
+
+int
+acquire_timer2(int mode)
+{
+
+ if (timer2_state != RELEASED)
+ return (-1);
+ timer2_state = ACQUIRED;
+
+ /*
+ * This access to the timer registers is as atomic as possible
+ * because it is a single instruction. We could do better if we
+ * knew the rate. Use of splclock() limits glitches to 10-100us,
+ * and this is probably good enough for timer2, so we aren't as
+ * careful with it as with timer0.
+ */
+ outb(TIMER_MODE, TIMER_SEL2 | (mode & 0x3f));
+
+ return (0);
+}
+
+int
+release_timer2()
+{
+
+ if (timer2_state != ACQUIRED)
+ return (-1);
+ timer2_state = RELEASED;
+ outb(TIMER_MODE, TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT);
+ return (0);
+}
+
+static void
+sysbeepstop(void *chan)
+{
+ outb(IO_PPI, inb(IO_PPI)&0xFC); /* disable counter2 output to speaker */
+ release_timer2();
+ beeping = 0;
+}
+
+/*
+ * Frequency of all three count-down timers; (TIMER_FREQ/freq) is the
+ * appropriate count to generate a frequency of freq hz.
+ */
+#ifndef TIMER_FREQ
+#define TIMER_FREQ 1193182
+#endif
+#define TIMER_DIV(x) ((TIMER_FREQ+(x)/2)/(x))
+
+int
+sysbeep(int pitch, int period)
+{
+ int x = splhigh();
+
+ if (acquire_timer2(TIMER_SQWAVE|TIMER_16BIT))
+ if (!beeping) {
+ /* Something else owns it. */
+ splx(x);
+ return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
+ }
+
+ if (pitch) pitch = TIMER_DIV(pitch);
+
+ outb(TIMER_CNTR2, pitch);
+ outb(TIMER_CNTR2, (pitch>>8));
+ if (!beeping) {
+ /* enable counter2 output to speaker */
+ if (pitch) outb(IO_PPI, inb(IO_PPI) | 3);
+ beeping = period;
+ timeout(sysbeepstop, (void *)NULL, period);
+ }
+ splx(x);
+ return (0);
+}
+
diff --git a/sys/ia64/ia64/clock_if.m b/sys/ia64/ia64/clock_if.m
new file mode 100644
index 0000000..9665135
--- /dev/null
+++ b/sys/ia64/ia64/clock_if.m
@@ -0,0 +1,52 @@
+#
+# Copyright (c) 1998 Doug Rabson
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <sys/bus.h>
+#include <machine/clockvar.h>
+
+INTERFACE clock;
+
+METHOD void init {
+ device_t dev;
+};
+
+METHOD void get {
+ device_t dev;
+ time_t base;
+ struct clocktime *ct;
+};
+
+METHOD void set {
+ device_t dev;
+ struct clocktime *ct;
+};
+
+METHOD int getsecs {
+ device_t dev;
+ int *secp;
+};
diff --git a/sys/ia64/ia64/elf_machdep.c b/sys/ia64/ia64/elf_machdep.c
new file mode 100644
index 0000000..62302d9
--- /dev/null
+++ b/sys/ia64/ia64/elf_machdep.c
@@ -0,0 +1,76 @@
+/*-
+ * Copyright 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/namei.h>
+#include <sys/fcntl.h>
+#include <sys/vnode.h>
+#include <sys/linker.h>
+#include <machine/elf.h>
+
+/* Process one elf relocation with addend. */
+int
+elf_reloc(linker_file_t lf, const void *data, int type, const char *sym)
+{
+ Elf_Addr relocbase = (Elf_Addr) lf->address;
+ Elf_Addr *where;
+ Elf_Addr addr;
+ Elf_Addr addend;
+ Elf_Word rtype;
+ const Elf_Rel *rel;
+ const Elf_Rela *rela;
+
+ switch (type) {
+ case ELF_RELOC_REL:
+ rel = (const Elf_Rel *)data;
+ where = (Elf_Addr *) (relocbase + rel->r_offset);
+ addend = *where;
+ rtype = ELF_R_TYPE(rel->r_info);
+ break;
+ case ELF_RELOC_RELA:
+ rela = (const Elf_Rela *)data;
+ where = (Elf_Addr *) (relocbase + rela->r_offset);
+ addend = rela->r_addend;
+ rtype = ELF_R_TYPE(rela->r_info);
+ break;
+ default:
+ panic("elf_reloc: unknown relocation mode %d\n", type);
+ }
+
+ switch (rtype) {
+
+ default:
+ printf("kldload: unexpected relocation type %d\n",
+ (int) rtype);
+ return -1;
+ }
+ return(0);
+}
diff --git a/sys/ia64/ia64/exception.S b/sys/ia64/ia64/exception.S
new file mode 100644
index 0000000..0f06ef0
--- /dev/null
+++ b/sys/ia64/ia64/exception.S
@@ -0,0 +1,949 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asm.h>
+#include <machine/pmap.h>
+#include <assym.s>
+
+/*
+ * ar.k7 = curproc
+ * ar.k6 = ksp
+ */
+
+/*
+ * Call exception_save_regs to preserve the interrupted state in a
+ * trapframe and call trap() with the value of _n_ as an argument. We
+ * arrange for trap() to return to exception_return which will restore
+ * the interrupted state before executing an rfi to resume it.
+ */
+#define TRAP(_n_) \
+ mov r16=b0; \
+ br.call.sptk.few b0=exception_save_regs; \
+ alloc r16=ar.pfs,0,0,2,0; \
+ movl r17=exception_return; \
+ mov out0=_n_; \
+ mov out1=sp;; \
+ mov rp=r17; \
+ br.call.sptk.few b6=trap
+
+/*
+ * The IA64 Interrupt Vector Table (IVT) contains 20 slots with 64
+ * bundles per vector and 48 slots with 16 bundles per vector.
+ */
+
+ .section .text.ivt,"ax"
+
+ .align 32768
+ .global ia64_vector_table
+ia64_vector_table:
+
+/* 0x0000: VHPT Translation vector */
+
+ TRAP(0)
+ .align 1024
+
+/* 0x0400: Instruction TLB vector */
+
+ mov r16=cr.ifa
+ mov r17=pr
+ ;;
+ thash r18=r16
+ ttag r19=r16
+ ;;
+ add r20=24,r18 // collision chain
+ ;;
+ ld8 r20=[r20] // first entry
+ ;;
+ rsm psr.dt // turn off data translations
+ ;;
+ srlz.d // serialize
+ ;;
+1: cmp.eq p1,p2=r0,r20 // done?
+(p1) br.cond.spnt.few 9f // bail if done
+ ;;
+ add r21=16,r20 // tag location
+ ;;
+ ld8 r21=[r21] // read tag
+ ;;
+ cmp.eq p1,p2=r21,r19 // compare tags
+(p2) br.cond.sptk.few 2f // if not, read next in chain
+ ;;
+ ld8 r21=[r20],8 // read pte
+ ;;
+ ld8 r22=[r20] // read rest of pte
+ ;;
+ dep r18=0,r18,61,3 // convert vhpt ptr to physical
+ ;;
+ add r20=16,r18 // address of tag
+ ;;
+ ld8.acq r23=[r20] // read old tag
+ movl r24=(1<<63) // ti bit
+ ;;
+ or r23=r23,r24 // set ti bit
+ ;;
+ st8.rel [r20]=r23 // store old tag + ti
+ ;;
+ mf // make sure everyone sees
+ ;;
+ st8 [r18]=r21,8 // store pte
+ ;;
+ st8 [r18]=r22,8
+ ;;
+ st8.rel [r18]=r19 // store new tag
+ ;;
+ mov pr=r17,0x1ffff // restore predicates
+ ;;
+ rfi // walker will retry the access
+
+2: add r20=24,r20 // next in chain
+ ;;
+ ld8 r20=[r20] // read chain
+ br.cond.sptk.few 1b // loop
+
+9: mov pr=r17,0x1ffff // restore predicates
+ TRAP(1) // die horribly
+
+ .align 1024
+
+/* 0x0800: Data TLB vector */
+
+ mov r16=cr.ifa
+ mov r17=pr
+ ;;
+ thash r18=r16
+ ttag r19=r16
+ ;;
+ add r20=24,r18 // collision chain
+ ;;
+ ld8 r20=[r20] // first entry
+ ;;
+ rsm psr.dt // turn off data translations
+ ;;
+ srlz.d // serialize
+ ;;
+1: cmp.eq p1,p2=r0,r20 // done?
+(p1) br.cond.spnt.few 9f // bail if done
+ ;;
+ add r21=16,r20 // tag location
+ ;;
+ ld8 r21=[r21] // read tag
+ ;;
+ cmp.eq p1,p2=r21,r19 // compare tags
+(p2) br.cond.sptk.few 2f // if not, read next in chain
+ ;;
+ ld8 r21=[r20],8 // read pte
+ ;;
+ ld8 r22=[r20] // read rest of pte
+ ;;
+ dep r18=0,r18,61,3 // convert vhpt ptr to physical
+ ;;
+ add r20=16,r18 // address of tag
+ ;;
+ ld8.acq r23=[r20] // read old tag
+ movl r24=(1<<63) // ti bit
+ ;;
+ or r23=r23,r24 // set ti bit
+ ;;
+ st8.rel [r20]=r23 // store old tag + ti
+ ;;
+ mf // make sure everyone sees
+ ;;
+ st8 [r18]=r21,8 // store pte
+ ;;
+ st8 [r18]=r22,8
+ ;;
+ st8.rel [r18]=r19 // store new tag
+ ;;
+ mov pr=r17,0x1ffff // restore predicates
+ ;;
+ rfi // walker will retry the access
+
+2: add r20=24,r20 // next in chain
+ ;;
+ ld8 r20=[r20] // read chain
+ br.cond.sptk.few 1b // loop
+
+9: mov pr=r17,0x1ffff // restore predicates
+ TRAP(2) // die horribly
+
+ .align 1024
+
+/* 0x0c00: Alternate Instruction TLB vector */
+
+ mov r16=cr.ifa // where did it happen
+ ;;
+ mov r18=pr // save predicates
+ ;;
+ extr.u r17=r16,61,3 // get region number
+ ;;
+ cmp.eq p1,p2=7,r17 // RR7->p1, RR6->p2
+ ;;
+(p1) movl r17=PTE_P+PTE_MA_WC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RX
+(p2) movl r17=PTE_P+PTE_MA_UC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RX
+ ;;
+ dep r16=0,r16,50,14 // clear bits above PPN
+ ;;
+ dep r16=r17,r17,0,12 // put pte bits in 0..11
+ ;;
+ itc.i r16
+ mov pr=r18,0x1ffff // restore predicates
+ ;;
+ rfi
+
+ .align 1024
+
+/* 0x1000: Alternate Data TLB vector */
+
+ mov r16=cr.ifa // where did it happen
+ mov r18=pr // save predicates
+ ;;
+ extr.u r17=r16,61,3 // get region number
+ ;;
+ cmp.eq p1,p2=7,r17 // RR7->p1, RR6->p2
+ ;;
+(p1) movl r17=PTE_P+PTE_MA_WC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RW
+(p2) movl r17=PTE_P+PTE_MA_UC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RW
+ ;;
+ dep r16=0,r16,50,14 // clear bits above PPN
+ ;;
+ dep r16=r17,r17,0,12 // put pte bits in 0..11
+ ;;
+ itc.d r16
+ mov pr=r18,0x1ffff // restore predicates
+ ;;
+ rfi
+
+ .align 1024
+
+/* 0x1400: Data Nested TLB vector */
+
+ TRAP(5)
+ .align 1024
+
+/* 0x1800: Instruction Key Miss vector */
+
+ TRAP(6)
+ .align 1024
+
+/* 0x1c00: Data Key Miss vector */
+
+ TRAP(7)
+ .align 1024
+
+/* 0x2000: Dirty-Bit vector */
+
+ TRAP(8)
+ .align 1024
+
+/* 0x2400: Instruction Access-Bit vector */
+
+ TRAP(9)
+ .align 1024
+
+/* 0x2800: Data Access-Bit vector */
+
+ TRAP(10)
+ .align 1024
+
+/* 0x2c00: Break Instruction vector */
+
+ TRAP(11)
+ .align 1024
+
+/* 0x3000: External Interrupt vector */
+
+ TRAP(12)
+ .align 1024
+
+/* 0x3400: Reserved */
+
+ TRAP(13)
+ .align 1024
+
+/* 0x3800: Reserved */
+
+ TRAP(14)
+ .align 1024
+
+/* 0x3c00: Reserved */
+
+ TRAP(15)
+ .align 1024
+
+/* 0x4000: Reserved */
+
+ TRAP(16)
+ .align 1024
+
+/* 0x4400: Reserved */
+
+ TRAP(17)
+ .align 1024
+
+/* 0x4800: Reserved */
+
+ TRAP(18)
+ .align 1024
+
+/* 0x4c00: Reserved */
+
+ TRAP(19)
+ .align 1024
+
+/* 0x5000: Page Not Present vector */
+
+ TRAP(20)
+ .align 256
+
+/* 0x5100: Key Permission vector */
+
+ TRAP(21)
+ .align 256
+
+/* 0x5200: Instruction Access Rights vector */
+
+ TRAP(22)
+ .align 256
+
+/* 0x5300: Data Access Rights vector */
+
+ TRAP(23)
+ .align 256
+
+/* 0x5400: General Exception vector */
+
+ TRAP(24)
+ .align 256
+
+/* 0x5500: Disabled FP-Register vector */
+
+ TRAP(25)
+ .align 256
+
+/* 0x5600: NaT Consumption vector */
+
+ TRAP(26)
+ .align 256
+
+/* 0x5700: Speculation vector */
+
+ TRAP(27)
+ .align 256
+
+/* 0x5800: Reserved */
+
+ TRAP(28)
+ .align 256
+
+/* 0x5900: Debug vector */
+
+ TRAP(29)
+ .align 256
+
+/* 0x5a00: Unaligned Reference vector */
+
+ TRAP(30)
+ .align 256
+
+/* 0x5b00: Unsupported Data Reference vector */
+
+ TRAP(31)
+ .align 256
+
+/* 0x5c00: Floating-point Fault vector */
+
+ TRAP(32)
+ .align 256
+
+/* 0x5d00: Floating-point Trap vector */
+
+ TRAP(33)
+ .align 256
+
+/* 0x5e00: Lower-Privilege Transfer Trap vector */
+
+ TRAP(34)
+ .align 256
+
+/* 0x5f00: Taken Branch Trap vector */
+
+ TRAP(35)
+ .align 256
+
+/* 0x6000: Single Step Trap vector */
+
+ TRAP(36)
+ .align 256
+
+/* 0x6100: Reserved */
+
+ TRAP(37)
+ .align 256
+
+/* 0x6200: Reserved */
+
+ TRAP(38)
+ .align 256
+
+/* 0x6300: Reserved */
+
+ TRAP(39)
+ .align 256
+
+/* 0x6400: Reserved */
+
+ TRAP(40)
+ .align 256
+
+/* 0x6500: Reserved */
+
+ TRAP(41)
+ .align 256
+
+/* 0x6600: Reserved */
+
+ TRAP(42)
+ .align 256
+
+/* 0x6700: Reserved */
+
+ TRAP(43)
+ .align 256
+
+/* 0x6800: Reserved */
+
+ TRAP(44)
+ .align 256
+
+/* 0x6900: IA-32 Exception vector */
+
+ TRAP(45)
+ .align 256
+
+/* 0x6a00: IA-32 Intercept vector */
+
+ TRAP(46)
+ .align 256
+
+/* 0x6b00: IA-32 Interrupt vector */
+
+ TRAP(47)
+ .align 256
+
+/* 0x6c00: Reserved */
+
+ TRAP(48)
+ .align 256
+
+/* 0x6d00: Reserved */
+
+ TRAP(49)
+ .align 256
+
+/* 0x6e00: Reserved */
+
+ TRAP(50)
+ .align 256
+
+/* 0x6f00: Reserved */
+
+ TRAP(51)
+ .align 256
+
+/* 0x7000: Reserved */
+
+ TRAP(52)
+ .align 256
+
+/* 0x7100: Reserved */
+
+ TRAP(53)
+ .align 256
+
+/* 0x7200: Reserved */
+
+ TRAP(54)
+ .align 256
+
+/* 0x7300: Reserved */
+
+ TRAP(55)
+ .align 256
+
+/* 0x7400: Reserved */
+
+ TRAP(56)
+ .align 256
+
+/* 0x7500: Reserved */
+
+ TRAP(57)
+ .align 256
+
+/* 0x7600: Reserved */
+
+ TRAP(58)
+ .align 256
+
+/* 0x7700: Reserved */
+
+ TRAP(59)
+ .align 256
+
+/* 0x7800: Reserved */
+
+ TRAP(60)
+ .align 256
+
+/* 0x7900: Reserved */
+
+ TRAP(61)
+ .align 256
+
+/* 0x7a00: Reserved */
+
+ TRAP(62)
+ .align 256
+
+/* 0x7b00: Reserved */
+
+ TRAP(63)
+ .align 256
+
+/* 0x7c00: Reserved */
+
+ TRAP(64)
+ .align 256
+
+/* 0x7d00: Reserved */
+
+ TRAP(65)
+ .align 256
+
+/* 0x7e00: Reserved */
+
+ TRAP(66)
+ .align 256
+
+/* 0x7f00: Reserved */
+
+ TRAP(67)
+ .align 256
+
+ .section .data.vhpt,"aw"
+
+ .global ia64_vhpt
+
+ .align 32768
+ia64_vhpt: .quad 0
+ .align 32768
+
+ .text
+
+#define rIIP r31
+#define rIPSR r30
+#define rISR r29
+#define rIFA r28
+#define rPR r27
+#define rSP r26
+#define rIFS r25
+#define rR1 r24
+#define rR2 r23
+#define rBSPSTORE r22
+#define rRNAT r21
+#define rBSP r27 /* overlay rPR */
+#define rRSC r20
+#define rPFS r19
+#define rB0 r31 /* overlay rIIP */
+
+/*
+ * exception_return: restore interrupted state
+ *
+ * Arguments:
+ * sp trapframe pointer
+ *
+ */
+LEAF(exception_return, 0)
+
+ rsm psr.ic|psr.dt // disable interrupt collection and vm
+ ;;
+ srlz.d
+ dep r3=0,sp,61,3 // physical address
+ ;;
+ add r1=SIZEOF_TRAPFRAME-16,r3 // r1=&tf_f[FRAME_F15]
+ add r2=SIZEOF_TRAPFRAME-32,r3 // r2=&tf_f[FRAME_F14]
+ ;;
+ ldf.fill f15=[r1],-32 // r1=&tf_f[FRAME_F13]
+ ldf.fill f14=[r2],-32 // r2=&tf_f[FRAME_F12]
+ ;;
+ ldf.fill f13=[r1],-32 // r1=&tf_f[FRAME_F11]
+ ldf.fill f12=[r2],-32 // r2=&tf_f[FRAME_F10]
+ ;;
+ ldf.fill f11=[r1],-32 // r1=&tf_f[FRAME_F9]
+ ldf.fill f10=[r2],-32 // r2=&tf_f[FRAME_F8]
+ ;;
+ ldf.fill f9=[r1],-32 // r1=&tf_f[FRAME_F7]
+ ldf.fill f8=[r2],-32 // r2=&tf_f[FRAME_F6]
+ ;;
+ ldf.fill f7=[r1],-24 // r1=&tf_r[FRAME_R31]
+ ldf.fill f6=[r2],-24 // r2=&tf_r[FRAME_R30]
+ ;;
+ ld8.fill r31=[r1],-16 // r1=&tf_r[FRAME_R29]
+ ld8.fill r30=[r2],-16 // r2=&tf_r[FRAME_R28]
+ ;;
+ ld8.fill r29=[r1],-16 // r1=&tf_r[FRAME_R27]
+ ld8.fill r28=[r2],-16 // r2=&tf_r[FRAME_R26]
+ ;;
+ ld8.fill r27=[r1],-16 // r1=&tf_r[FRAME_R25]
+ ld8.fill r26=[r2],-16 // r2=&tf_r[FRAME_R24]
+ ;;
+ ld8.fill r25=[r1],-16 // r1=&tf_r[FRAME_R23]
+ ld8.fill r24=[r2],-16 // r2=&tf_r[FRAME_R22]
+ ;;
+ ld8.fill r23=[r1],-16 // r1=&tf_r[FRAME_R21]
+ ld8.fill r22=[r2],-16 // r2=&tf_r[FRAME_R20]
+ ;;
+ ld8.fill r21=[r1],-16 // r1=&tf_r[FRAME_R19]
+ ld8.fill r20=[r2],-16 // r2=&tf_r[FRAME_R18]
+ ;;
+ ld8.fill r19=[r1],-16 // r1=&tf_r[FRAME_R17]
+ ld8.fill r18=[r2],-16 // r2=&tf_r[FRAME_R16]
+ ;;
+ ld8.fill r17=[r1],-16 // r1=&tf_r[FRAME_R15]
+ ld8.fill r16=[r2],-16 // r2=&tf_r[FRAME_R14]
+ ;;
+ bsw.0 // switch to bank 0
+ ;;
+ ld8.fill r15=[r1],-16 // r1=&tf_r[FRAME_R13]
+ ld8.fill r14=[r2],-16 // r2=&tf_r[FRAME_R12]
+ ;;
+ ld8.fill r13=[r1],-16 // r1=&tf_r[FRAME_R11]
+ ld8.fill r12=[r2],-16 // r2=&tf_r[FRAME_R10]
+ ;;
+ ld8.fill r11=[r1],-16 // r1=&tf_r[FRAME_R9]
+ ld8.fill r10=[r2],-16 // r2=&tf_r[FRAME_R8]
+ ;;
+ ld8.fill r9=[r1],-16 // r1=&tf_r[FRAME_R7]
+ ld8.fill r8=[r2],-16 // r2=&tf_r[FRAME_R6]
+ ;;
+ ld8.fill r7=[r1],-16 // r1=&tf_r[FRAME_R5]
+ ld8.fill r6=[r2],-16 // r2=&tf_r[FRAME_R4]
+ ;;
+ ld8.fill r5=[r1],-16 // r1=&tf_r[FRAME_R3]
+ ld8.fill r4=[r2],-16 // r2=&tf_r[FRAME_R2]
+ ;;
+ ld8.fill r3=[r1],-16 // r1=&tf_r[FRAME_R1]
+ ld8.fill rR2=[r2],-16 // r2=&tf_b[7]
+ ;;
+ ld8.fill rR1=[r1],-16 // r1=&tf_b[6]
+ ld8 r16=[r2],-16 // r2=&tf_b[5]
+ ;;
+ mov b7=r16
+ ld8 r18=[r1],-16 // r1=&tf_b[4]
+ ld8 r19=[r2],-16 // r2=&tf_b[3]
+ ;;
+ mov b6=r18
+ mov b5=r19
+ ld8 r16=[r1],-16 // r1=&tf_b[2]
+ ld8 r17=[r2],-16 // r2=&tf_b[1]
+ ;;
+ mov b4=r16
+ mov b3=r17
+ ld8 r18=[r1],-16 // r1=&tf_b[0]
+ ld8 r19=[r2],-16 // r2=&tf_ar_fpsr
+ ;;
+ mov b2=r18
+ mov b1=r19
+ ld8 r16=[r1],-16 // r1=&tf_ar_ccv
+ ld8 r17=[r2],-16 // r2=&tf_ar_unat
+ ;;
+ mov b0=r16
+ mov ar.fpsr=r17
+ ld8 r18=[r1],-16 // r1=&tf_ar_bsp
+ ld8 r19=[r2],-16 // r2=&tf_ar_rnat
+ ;;
+ mov ar.ccv=r18
+ mov ar.unat=r19
+ ld8 rBSP=[r1],-16 // r1=&tf_ar_bspstore
+ ld8 rRNAT=[r2],-16 // r2=&tf_cr_ifs
+ ;;
+ ld8 rBSPSTORE=[r1],-16 // r1=&tf_cr_pfs
+ ld8 rIFS=[r2],-16 // r2=&tf_ar_rsc
+ ;;
+ ld8 rPFS=[r1],-16 // r1=&tf_pr
+ ld8 rRSC=[r2],-16 // r2=&tf_cr_ifa
+ ;;
+ ld8 rPR=[r1],-16 // r1=&tf_cr_isr
+ ld8 rIFA=[r2],-16 // r2=&tf_cr_ipsr
+ ;;
+ ld8 rIIP=[r1]
+ ld8 rIPSR=[r2]
+ ;;
+ extr.u r16=rIPSR,32,2 // extract ipsr.cpl
+ ;;
+ cmp.eq p1,p2=r0,r17 // test for kernel mode
+ ;;
+(p2) br.cond.dpnt.few 1f // don't switch bs if not user
+ ;;
+ sub r16=rBSP,rBSPSTORE // how many bytes to load?
+ ;;
+ shl r16=r16,16 // value for ar.rsc
+ ;;
+ mov ar.rsc=r16 // setup for loadrs
+ ;;
+ loadrs // load user regs from kernel bs
+ ;;
+ mov ar.bspstore=rBSPSTORE
+ ;;
+ mov ar.rnat=rRNAT
+
+1: mov r1=rR1
+ mov r2=rR2
+ mov cr.ifs=rIFS
+ mov ar.rsc=rRSC
+ mov pr=rPR,0x1ffff
+ mov cr.ifa=rIFA
+ mov cr.iip=rIIP
+ mov cr.ipsr=rIPSR
+ ;;
+ rfi
+
+ END(exception_return)
+
+
+/*
+ * exception_save_regs: save interrupted state
+ *
+ * Arguments:
+ * b0 return address
+ * r16 saved b0
+ *
+ * Return:
+ * sp kernel stack pointer
+ */
+LEAF(exception_save_regs, 0)
+ rsm psr.dt // turn off data translations
+ ;;
+ srlz.d // serialize
+ ;;
+ mov rIIP=cr.iip
+ mov rIPSR=cr.ipsr
+ mov rISR=cr.isr
+ mov rIFA=cr.ifa
+ mov rPR=pr
+ ;;
+ extr.u r17=rIPSR,32,2 // extract ipsr.cpl
+ ;;
+ cmp.eq p1,p2=r0,r17 // test for kernel mode
+ ;;
+ mov rSP=sp // save sp
+ ;;
+(p2) mov sp=ar.k6 // and switch to kernel stack
+ mov r16=SIZEOF_TRAPFRAME
+ ;;
+ sub sp=sp,r16 // reserve trapframe
+ ;;
+ mov rR1=r1
+ mov rR2=r2
+ ;;
+ dep r1=0,sp,61,3 // r1=&tf_cr_iip
+ ;;
+ add r2=8,r1 // r2=&tf_cr_ipsr
+ ;;
+ st8 [r1]=rIIP,16 // r1=&tf_cr_isr
+ st8 [r2]=rIPSR,16 // r2=&tf_cr_ifa
+ ;;
+ st8 [r1]=rISR,16 // r1=&tf_pr
+ st8 [r2]=rIFA,16 // r2=&tf_ar_rsc
+ ;;
+ st8 [r1]=rPR,16 // r1=&tf_cr_pfs
+
+ mov rB0=r16
+ mov rRSC=ar.rsc
+ mov rPFS=ar.pfs
+ cover
+(p2) mov r16=ar.k7 // curproc
+ mov rIFS=cr.ifs
+ ;;
+(p2) add r16=P_ADDR,r16 // &curproc->p_addr
+ mov ar.rsc=0
+ ;;
+(p2) ld8 r16=[r16] // curproc->p_addr
+ mov rBSPSTORE=ar.bspstore
+ ;;
+(p2) add r16=SIZEOF_USER,r16 // kernel backing store
+ mov rRNAT=ar.rnat
+ mov rBSP=ar.bsp
+ ;;
+(p2) mov ar.bspstore=r16 // switch bspstore
+ st8 [r2]=rRSC,16 // r2=&tf_cr_ifs
+ ;;
+ st8 [r1]=rPFS,16 // r1=&tf_ar_bspstore
+ st8 [r2]=rIFS,16 // r2=&tf_ar_rnat
+ ;;
+ st8 [r1]=rBSPSTORE,16 // r1=&tf_ar_bsp
+ st8 [r2]=rRNAT,16 // r2=&tf_ar_unat
+ ;;
+ st8 [r1]=rBSP,16 // r1=&tf_ar_ccv
+ mov ar.rsc=3 // switch RSE back on
+ mov r16=ar.unat
+ ;;
+ mov r17=ar.ccv
+ st8 [r2]=r16,16 // r2=&tf_ar_fpsr
+ mov r18=ar.fpsr
+ ;;
+ st8 [r1]=r17,16 // r1=&tf_b[0]
+ st8 [r2]=r18,16 // r2=&tf_b[1]
+ mov r17=b1
+ ;;
+ st8 [r1]=rB0,16 // r1=&tf_b[2]
+ mov r18=b2
+ st8 [r2]=r17,16 // r2=&tf_b[3]
+ ;;
+ mov r17=b3
+ st8 [r1]=r18,16 // r1=&tf_b[4]
+ ;;
+ mov r18=b4
+ st8 [r2]=r17,16 // r2=&tf_b[5]
+ ;;
+ mov r17=b5
+ st8 [r1]=r18,16 // r1=&tf_b[6]
+ ;;
+ mov r18=b6
+ st8 [r2]=r17,16 // r2=&tf_b[7]
+ ;;
+ mov r17=b7
+ st8 [r1]=r18,16 // r1=&tf_r[FRAME_R1]
+ ;;
+ st8 [r2]=r17,16 // r2=&tf_r[FRAME_R2]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=rR1,16 // r1=&tf_r[FRAME_R3]
+ .mem.offset 8,0
+ st8.spill [r2]=rR2,16 // r2=&tf_r[FRAME_R4]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r3,16 // r1=&tf_r[FRAME_R5]
+ .mem.offset 8,0
+ st8.spill [r2]=r4,16 // r2=&tf_r[FRAME_R6]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r5,16 // r1=&tf_r[FRAME_R7]
+ .mem.offset 8,0
+ st8.spill [r2]=r6,16 // r2=&tf_r[FRAME_R8]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r7,16 // r1=&tf_r[FRAME_R9]
+ .mem.offset 8,0
+ st8.spill [r2]=r8,16 // r2=&tf_r[FRAME_R10]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r9,16 // r1=&tf_r[FRAME_R11]
+ .mem.offset 8,0
+ st8.spill [r2]=r10,16 // r2=&tf_r[FRAME_SP]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r11,16 // r1=&tf_r[FRAME_R13]
+ .mem.offset 8,0
+ st8.spill [r2]=rSP,16 // r2=&tf_r[FRAME_R14]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r13,16 // r1=&tf_r[FRAME_R15]
+ .mem.offset 8,0
+ st8.spill [r2]=r14,16 // r2=&tf_r[FRAME_R16]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r15,16 // r1=&tf_r[FRAME_R17]
+ ;;
+ bsw.1 // switch to bank 1
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r16,16 // r2=&tf_r[FRAME_R18]
+ .mem.offset 0,0
+ st8.spill [r1]=r17,16 // r1=&tf_r[FRAME_R19]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r18,16 // r2=&tf_r[FRAME_R20]
+ .mem.offset 0,0
+ st8.spill [r1]=r19,16 // r1=&tf_r[FRAME_R21]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r20,16 // r2=&tf_r[FRAME_R22]
+ .mem.offset 0,0
+ st8.spill [r1]=r21,16 // r1=&tf_r[FRAME_R23]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r22,16 // r2=&tf_r[FRAME_R24]
+ .mem.offset 0,0
+ st8.spill [r1]=r23,16 // r1=&tf_r[FRAME_R25]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r24,16 // r2=&tf_r[FRAME_R26]
+ .mem.offset 0,0
+ st8.spill [r1]=r25,16 // r1=&tf_r[FRAME_R27]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r26,16 // r2=&tf_r[FRAME_R28]
+ .mem.offset 0,0
+ st8.spill [r1]=r27,16 // r1=&tf_r[FRAME_R29]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r28,16 // r2=&tf_r[FRAME_R30]
+ .mem.offset 0,0
+ st8.spill [r1]=r29,16 // r1=&tf_r[FRAME_R31]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r30,24 // r2=&tf_f[FRAME_F6]
+ .mem.offset 0,0
+ st8.spill [r1]=r31,32 // r1=&tf_f[FRAME_F7]
+ ;;
+ stf.spill [r2]=f6,32 // r2=&tf_f[FRAME_F8]
+ stf.spill [r1]=f7,32 // r1=&tf_f[FRAME_F9]
+ ;;
+ stf.spill [r2]=f8,32 // r2=&tf_f[FRAME_F10]
+ stf.spill [r1]=f9,32 // r1=&tf_f[FRAME_F11]
+ ;;
+ stf.spill [r2]=f10,32 // r2=&tf_f[FRAME_F12]
+ stf.spill [r1]=f11,32 // r1=&tf_f[FRAME_F13]
+ ;;
+ stf.spill [r2]=f12,32 // r2=&tf_f[FRAME_F14]
+ stf.spill [r1]=f13,32 // r1=&tf_f[FRAME_F15]
+ ;;
+ stf.spill [r2]=f14 //
+ stf.spill [r1]=f15 //
+ ;;
+ movl r1=__gp // kernel globals
+ ssm psr.ic|psr.dt // enable interrupts & translation
+ ;;
+ srlz.d // serialize
+
+ br.ret.sptk.few b0
+
+ END(exception_save_regs)
+
+
diff --git a/sys/ia64/ia64/exception.s b/sys/ia64/ia64/exception.s
new file mode 100644
index 0000000..0f06ef0
--- /dev/null
+++ b/sys/ia64/ia64/exception.s
@@ -0,0 +1,949 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asm.h>
+#include <machine/pmap.h>
+#include <assym.s>
+
+/*
+ * ar.k7 = curproc
+ * ar.k6 = ksp
+ */
+
+/*
+ * Call exception_save_regs to preserve the interrupted state in a
+ * trapframe and call trap() with the value of _n_ as an argument. We
+ * arrange for trap() to return to exception_return which will restore
+ * the interrupted state before executing an rfi to resume it.
+ */
+#define TRAP(_n_) \
+ mov r16=b0; \
+ br.call.sptk.few b0=exception_save_regs; \
+ alloc r16=ar.pfs,0,0,2,0; \
+ movl r17=exception_return; \
+ mov out0=_n_; \
+ mov out1=sp;; \
+ mov rp=r17; \
+ br.call.sptk.few b6=trap
+
+/*
+ * The IA64 Interrupt Vector Table (IVT) contains 20 slots with 64
+ * bundles per vector and 48 slots with 16 bundles per vector.
+ */
+
+ .section .text.ivt,"ax"
+
+ .align 32768
+ .global ia64_vector_table
+ia64_vector_table:
+
+/* 0x0000: VHPT Translation vector */
+
+ TRAP(0)
+ .align 1024
+
+/* 0x0400: Instruction TLB vector */
+
+ mov r16=cr.ifa
+ mov r17=pr
+ ;;
+ thash r18=r16
+ ttag r19=r16
+ ;;
+ add r20=24,r18 // collision chain
+ ;;
+ ld8 r20=[r20] // first entry
+ ;;
+ rsm psr.dt // turn off data translations
+ ;;
+ srlz.d // serialize
+ ;;
+1: cmp.eq p1,p2=r0,r20 // done?
+(p1) br.cond.spnt.few 9f // bail if done
+ ;;
+ add r21=16,r20 // tag location
+ ;;
+ ld8 r21=[r21] // read tag
+ ;;
+ cmp.eq p1,p2=r21,r19 // compare tags
+(p2) br.cond.sptk.few 2f // if not, read next in chain
+ ;;
+ ld8 r21=[r20],8 // read pte
+ ;;
+ ld8 r22=[r20] // read rest of pte
+ ;;
+ dep r18=0,r18,61,3 // convert vhpt ptr to physical
+ ;;
+ add r20=16,r18 // address of tag
+ ;;
+ ld8.acq r23=[r20] // read old tag
+ movl r24=(1<<63) // ti bit
+ ;;
+ or r23=r23,r24 // set ti bit
+ ;;
+ st8.rel [r20]=r23 // store old tag + ti
+ ;;
+ mf // make sure everyone sees
+ ;;
+ st8 [r18]=r21,8 // store pte
+ ;;
+ st8 [r18]=r22,8
+ ;;
+ st8.rel [r18]=r19 // store new tag
+ ;;
+ mov pr=r17,0x1ffff // restore predicates
+ ;;
+ rfi // walker will retry the access
+
+2: add r20=24,r20 // next in chain
+ ;;
+ ld8 r20=[r20] // read chain
+ br.cond.sptk.few 1b // loop
+
+9: mov pr=r17,0x1ffff // restore predicates
+ TRAP(1) // die horribly
+
+ .align 1024
+
+/* 0x0800: Data TLB vector */
+
+ mov r16=cr.ifa
+ mov r17=pr
+ ;;
+ thash r18=r16
+ ttag r19=r16
+ ;;
+ add r20=24,r18 // collision chain
+ ;;
+ ld8 r20=[r20] // first entry
+ ;;
+ rsm psr.dt // turn off data translations
+ ;;
+ srlz.d // serialize
+ ;;
+1: cmp.eq p1,p2=r0,r20 // done?
+(p1) br.cond.spnt.few 9f // bail if done
+ ;;
+ add r21=16,r20 // tag location
+ ;;
+ ld8 r21=[r21] // read tag
+ ;;
+ cmp.eq p1,p2=r21,r19 // compare tags
+(p2) br.cond.sptk.few 2f // if not, read next in chain
+ ;;
+ ld8 r21=[r20],8 // read pte
+ ;;
+ ld8 r22=[r20] // read rest of pte
+ ;;
+ dep r18=0,r18,61,3 // convert vhpt ptr to physical
+ ;;
+ add r20=16,r18 // address of tag
+ ;;
+ ld8.acq r23=[r20] // read old tag
+ movl r24=(1<<63) // ti bit
+ ;;
+ or r23=r23,r24 // set ti bit
+ ;;
+ st8.rel [r20]=r23 // store old tag + ti
+ ;;
+ mf // make sure everyone sees
+ ;;
+ st8 [r18]=r21,8 // store pte
+ ;;
+ st8 [r18]=r22,8
+ ;;
+ st8.rel [r18]=r19 // store new tag
+ ;;
+ mov pr=r17,0x1ffff // restore predicates
+ ;;
+ rfi // walker will retry the access
+
+2: add r20=24,r20 // next in chain
+ ;;
+ ld8 r20=[r20] // read chain
+ br.cond.sptk.few 1b // loop
+
+9: mov pr=r17,0x1ffff // restore predicates
+ TRAP(2) // die horribly
+
+ .align 1024
+
+/* 0x0c00: Alternate Instruction TLB vector */
+
+ mov r16=cr.ifa // where did it happen
+ ;;
+ mov r18=pr // save predicates
+ ;;
+ extr.u r17=r16,61,3 // get region number
+ ;;
+ cmp.eq p1,p2=7,r17 // RR7->p1, RR6->p2
+ ;;
+(p1) movl r17=PTE_P+PTE_MA_WC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RX
+(p2) movl r17=PTE_P+PTE_MA_UC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RX
+ ;;
+ dep r16=0,r16,50,14 // clear bits above PPN
+ ;;
+ dep r16=r17,r17,0,12 // put pte bits in 0..11
+ ;;
+ itc.i r16
+ mov pr=r18,0x1ffff // restore predicates
+ ;;
+ rfi
+
+ .align 1024
+
+/* 0x1000: Alternate Data TLB vector */
+
+ mov r16=cr.ifa // where did it happen
+ mov r18=pr // save predicates
+ ;;
+ extr.u r17=r16,61,3 // get region number
+ ;;
+ cmp.eq p1,p2=7,r17 // RR7->p1, RR6->p2
+ ;;
+(p1) movl r17=PTE_P+PTE_MA_WC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RW
+(p2) movl r17=PTE_P+PTE_MA_UC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RW
+ ;;
+ dep r16=0,r16,50,14 // clear bits above PPN
+ ;;
+ dep r16=r17,r17,0,12 // put pte bits in 0..11
+ ;;
+ itc.d r16
+ mov pr=r18,0x1ffff // restore predicates
+ ;;
+ rfi
+
+ .align 1024
+
+/* 0x1400: Data Nested TLB vector */
+
+ TRAP(5)
+ .align 1024
+
+/* 0x1800: Instruction Key Miss vector */
+
+ TRAP(6)
+ .align 1024
+
+/* 0x1c00: Data Key Miss vector */
+
+ TRAP(7)
+ .align 1024
+
+/* 0x2000: Dirty-Bit vector */
+
+ TRAP(8)
+ .align 1024
+
+/* 0x2400: Instruction Access-Bit vector */
+
+ TRAP(9)
+ .align 1024
+
+/* 0x2800: Data Access-Bit vector */
+
+ TRAP(10)
+ .align 1024
+
+/* 0x2c00: Break Instruction vector */
+
+ TRAP(11)
+ .align 1024
+
+/* 0x3000: External Interrupt vector */
+
+ TRAP(12)
+ .align 1024
+
+/* 0x3400: Reserved */
+
+ TRAP(13)
+ .align 1024
+
+/* 0x3800: Reserved */
+
+ TRAP(14)
+ .align 1024
+
+/* 0x3c00: Reserved */
+
+ TRAP(15)
+ .align 1024
+
+/* 0x4000: Reserved */
+
+ TRAP(16)
+ .align 1024
+
+/* 0x4400: Reserved */
+
+ TRAP(17)
+ .align 1024
+
+/* 0x4800: Reserved */
+
+ TRAP(18)
+ .align 1024
+
+/* 0x4c00: Reserved */
+
+ TRAP(19)
+ .align 1024
+
+/* 0x5000: Page Not Present vector */
+
+ TRAP(20)
+ .align 256
+
+/* 0x5100: Key Permission vector */
+
+ TRAP(21)
+ .align 256
+
+/* 0x5200: Instruction Access Rights vector */
+
+ TRAP(22)
+ .align 256
+
+/* 0x5300: Data Access Rights vector */
+
+ TRAP(23)
+ .align 256
+
+/* 0x5400: General Exception vector */
+
+ TRAP(24)
+ .align 256
+
+/* 0x5500: Disabled FP-Register vector */
+
+ TRAP(25)
+ .align 256
+
+/* 0x5600: NaT Consumption vector */
+
+ TRAP(26)
+ .align 256
+
+/* 0x5700: Speculation vector */
+
+ TRAP(27)
+ .align 256
+
+/* 0x5800: Reserved */
+
+ TRAP(28)
+ .align 256
+
+/* 0x5900: Debug vector */
+
+ TRAP(29)
+ .align 256
+
+/* 0x5a00: Unaligned Reference vector */
+
+ TRAP(30)
+ .align 256
+
+/* 0x5b00: Unsupported Data Reference vector */
+
+ TRAP(31)
+ .align 256
+
+/* 0x5c00: Floating-point Fault vector */
+
+ TRAP(32)
+ .align 256
+
+/* 0x5d00: Floating-point Trap vector */
+
+ TRAP(33)
+ .align 256
+
+/* 0x5e00: Lower-Privilege Transfer Trap vector */
+
+ TRAP(34)
+ .align 256
+
+/* 0x5f00: Taken Branch Trap vector */
+
+ TRAP(35)
+ .align 256
+
+/* 0x6000: Single Step Trap vector */
+
+ TRAP(36)
+ .align 256
+
+/* 0x6100: Reserved */
+
+ TRAP(37)
+ .align 256
+
+/* 0x6200: Reserved */
+
+ TRAP(38)
+ .align 256
+
+/* 0x6300: Reserved */
+
+ TRAP(39)
+ .align 256
+
+/* 0x6400: Reserved */
+
+ TRAP(40)
+ .align 256
+
+/* 0x6500: Reserved */
+
+ TRAP(41)
+ .align 256
+
+/* 0x6600: Reserved */
+
+ TRAP(42)
+ .align 256
+
+/* 0x6700: Reserved */
+
+ TRAP(43)
+ .align 256
+
+/* 0x6800: Reserved */
+
+ TRAP(44)
+ .align 256
+
+/* 0x6900: IA-32 Exception vector */
+
+ TRAP(45)
+ .align 256
+
+/* 0x6a00: IA-32 Intercept vector */
+
+ TRAP(46)
+ .align 256
+
+/* 0x6b00: IA-32 Interrupt vector */
+
+ TRAP(47)
+ .align 256
+
+/* 0x6c00: Reserved */
+
+ TRAP(48)
+ .align 256
+
+/* 0x6d00: Reserved */
+
+ TRAP(49)
+ .align 256
+
+/* 0x6e00: Reserved */
+
+ TRAP(50)
+ .align 256
+
+/* 0x6f00: Reserved */
+
+ TRAP(51)
+ .align 256
+
+/* 0x7000: Reserved */
+
+ TRAP(52)
+ .align 256
+
+/* 0x7100: Reserved */
+
+ TRAP(53)
+ .align 256
+
+/* 0x7200: Reserved */
+
+ TRAP(54)
+ .align 256
+
+/* 0x7300: Reserved */
+
+ TRAP(55)
+ .align 256
+
+/* 0x7400: Reserved */
+
+ TRAP(56)
+ .align 256
+
+/* 0x7500: Reserved */
+
+ TRAP(57)
+ .align 256
+
+/* 0x7600: Reserved */
+
+ TRAP(58)
+ .align 256
+
+/* 0x7700: Reserved */
+
+ TRAP(59)
+ .align 256
+
+/* 0x7800: Reserved */
+
+ TRAP(60)
+ .align 256
+
+/* 0x7900: Reserved */
+
+ TRAP(61)
+ .align 256
+
+/* 0x7a00: Reserved */
+
+ TRAP(62)
+ .align 256
+
+/* 0x7b00: Reserved */
+
+ TRAP(63)
+ .align 256
+
+/* 0x7c00: Reserved */
+
+ TRAP(64)
+ .align 256
+
+/* 0x7d00: Reserved */
+
+ TRAP(65)
+ .align 256
+
+/* 0x7e00: Reserved */
+
+ TRAP(66)
+ .align 256
+
+/* 0x7f00: Reserved */
+
+ TRAP(67)
+ .align 256
+
+ .section .data.vhpt,"aw"
+
+ .global ia64_vhpt
+
+ .align 32768
+ia64_vhpt: .quad 0
+ .align 32768
+
+ .text
+
+#define rIIP r31
+#define rIPSR r30
+#define rISR r29
+#define rIFA r28
+#define rPR r27
+#define rSP r26
+#define rIFS r25
+#define rR1 r24
+#define rR2 r23
+#define rBSPSTORE r22
+#define rRNAT r21
+#define rBSP r27 /* overlay rPR */
+#define rRSC r20
+#define rPFS r19
+#define rB0 r31 /* overlay rIIP */
+
+/*
+ * exception_return: restore interrupted state
+ *
+ * Arguments:
+ * sp trapframe pointer
+ *
+ */
+LEAF(exception_return, 0)
+
+ rsm psr.ic|psr.dt // disable interrupt collection and vm
+ ;;
+ srlz.d
+ dep r3=0,sp,61,3 // physical address
+ ;;
+ add r1=SIZEOF_TRAPFRAME-16,r3 // r1=&tf_f[FRAME_F15]
+ add r2=SIZEOF_TRAPFRAME-32,r3 // r2=&tf_f[FRAME_F14]
+ ;;
+ ldf.fill f15=[r1],-32 // r1=&tf_f[FRAME_F13]
+ ldf.fill f14=[r2],-32 // r2=&tf_f[FRAME_F12]
+ ;;
+ ldf.fill f13=[r1],-32 // r1=&tf_f[FRAME_F11]
+ ldf.fill f12=[r2],-32 // r2=&tf_f[FRAME_F10]
+ ;;
+ ldf.fill f11=[r1],-32 // r1=&tf_f[FRAME_F9]
+ ldf.fill f10=[r2],-32 // r2=&tf_f[FRAME_F8]
+ ;;
+ ldf.fill f9=[r1],-32 // r1=&tf_f[FRAME_F7]
+ ldf.fill f8=[r2],-32 // r2=&tf_f[FRAME_F6]
+ ;;
+ ldf.fill f7=[r1],-24 // r1=&tf_r[FRAME_R31]
+ ldf.fill f6=[r2],-24 // r2=&tf_r[FRAME_R30]
+ ;;
+ ld8.fill r31=[r1],-16 // r1=&tf_r[FRAME_R29]
+ ld8.fill r30=[r2],-16 // r2=&tf_r[FRAME_R28]
+ ;;
+ ld8.fill r29=[r1],-16 // r1=&tf_r[FRAME_R27]
+ ld8.fill r28=[r2],-16 // r2=&tf_r[FRAME_R26]
+ ;;
+ ld8.fill r27=[r1],-16 // r1=&tf_r[FRAME_R25]
+ ld8.fill r26=[r2],-16 // r2=&tf_r[FRAME_R24]
+ ;;
+ ld8.fill r25=[r1],-16 // r1=&tf_r[FRAME_R23]
+ ld8.fill r24=[r2],-16 // r2=&tf_r[FRAME_R22]
+ ;;
+ ld8.fill r23=[r1],-16 // r1=&tf_r[FRAME_R21]
+ ld8.fill r22=[r2],-16 // r2=&tf_r[FRAME_R20]
+ ;;
+ ld8.fill r21=[r1],-16 // r1=&tf_r[FRAME_R19]
+ ld8.fill r20=[r2],-16 // r2=&tf_r[FRAME_R18]
+ ;;
+ ld8.fill r19=[r1],-16 // r1=&tf_r[FRAME_R17]
+ ld8.fill r18=[r2],-16 // r2=&tf_r[FRAME_R16]
+ ;;
+ ld8.fill r17=[r1],-16 // r1=&tf_r[FRAME_R15]
+ ld8.fill r16=[r2],-16 // r2=&tf_r[FRAME_R14]
+ ;;
+ bsw.0 // switch to bank 0
+ ;;
+ ld8.fill r15=[r1],-16 // r1=&tf_r[FRAME_R13]
+ ld8.fill r14=[r2],-16 // r2=&tf_r[FRAME_R12]
+ ;;
+ ld8.fill r13=[r1],-16 // r1=&tf_r[FRAME_R11]
+ ld8.fill r12=[r2],-16 // r2=&tf_r[FRAME_R10]
+ ;;
+ ld8.fill r11=[r1],-16 // r1=&tf_r[FRAME_R9]
+ ld8.fill r10=[r2],-16 // r2=&tf_r[FRAME_R8]
+ ;;
+ ld8.fill r9=[r1],-16 // r1=&tf_r[FRAME_R7]
+ ld8.fill r8=[r2],-16 // r2=&tf_r[FRAME_R6]
+ ;;
+ ld8.fill r7=[r1],-16 // r1=&tf_r[FRAME_R5]
+ ld8.fill r6=[r2],-16 // r2=&tf_r[FRAME_R4]
+ ;;
+ ld8.fill r5=[r1],-16 // r1=&tf_r[FRAME_R3]
+ ld8.fill r4=[r2],-16 // r2=&tf_r[FRAME_R2]
+ ;;
+ ld8.fill r3=[r1],-16 // r1=&tf_r[FRAME_R1]
+ ld8.fill rR2=[r2],-16 // r2=&tf_b[7]
+ ;;
+ ld8.fill rR1=[r1],-16 // r1=&tf_b[6]
+ ld8 r16=[r2],-16 // r2=&tf_b[5]
+ ;;
+ mov b7=r16
+ ld8 r18=[r1],-16 // r1=&tf_b[4]
+ ld8 r19=[r2],-16 // r2=&tf_b[3]
+ ;;
+ mov b6=r18
+ mov b5=r19
+ ld8 r16=[r1],-16 // r1=&tf_b[2]
+ ld8 r17=[r2],-16 // r2=&tf_b[1]
+ ;;
+ mov b4=r16
+ mov b3=r17
+ ld8 r18=[r1],-16 // r1=&tf_b[0]
+ ld8 r19=[r2],-16 // r2=&tf_ar_fpsr
+ ;;
+ mov b2=r18
+ mov b1=r19
+ ld8 r16=[r1],-16 // r1=&tf_ar_ccv
+ ld8 r17=[r2],-16 // r2=&tf_ar_unat
+ ;;
+ mov b0=r16
+ mov ar.fpsr=r17
+ ld8 r18=[r1],-16 // r1=&tf_ar_bsp
+ ld8 r19=[r2],-16 // r2=&tf_ar_rnat
+ ;;
+ mov ar.ccv=r18
+ mov ar.unat=r19
+ ld8 rBSP=[r1],-16 // r1=&tf_ar_bspstore
+ ld8 rRNAT=[r2],-16 // r2=&tf_cr_ifs
+ ;;
+ ld8 rBSPSTORE=[r1],-16 // r1=&tf_cr_pfs
+ ld8 rIFS=[r2],-16 // r2=&tf_ar_rsc
+ ;;
+ ld8 rPFS=[r1],-16 // r1=&tf_pr
+ ld8 rRSC=[r2],-16 // r2=&tf_cr_ifa
+ ;;
+ ld8 rPR=[r1],-16 // r1=&tf_cr_isr
+ ld8 rIFA=[r2],-16 // r2=&tf_cr_ipsr
+ ;;
+ ld8 rIIP=[r1]
+ ld8 rIPSR=[r2]
+ ;;
+ extr.u r16=rIPSR,32,2 // extract ipsr.cpl
+ ;;
+ cmp.eq p1,p2=r0,r17 // test for kernel mode
+ ;;
+(p2) br.cond.dpnt.few 1f // don't switch bs if not user
+ ;;
+ sub r16=rBSP,rBSPSTORE // how many bytes to load?
+ ;;
+ shl r16=r16,16 // value for ar.rsc
+ ;;
+ mov ar.rsc=r16 // setup for loadrs
+ ;;
+ loadrs // load user regs from kernel bs
+ ;;
+ mov ar.bspstore=rBSPSTORE
+ ;;
+ mov ar.rnat=rRNAT
+
+1: mov r1=rR1
+ mov r2=rR2
+ mov cr.ifs=rIFS
+ mov ar.rsc=rRSC
+ mov pr=rPR,0x1ffff
+ mov cr.ifa=rIFA
+ mov cr.iip=rIIP
+ mov cr.ipsr=rIPSR
+ ;;
+ rfi
+
+ END(exception_return)
+
+
+/*
+ * exception_save_regs: save interrupted state
+ *
+ * Arguments:
+ * b0 return address
+ * r16 saved b0
+ *
+ * Return:
+ * sp kernel stack pointer
+ */
+LEAF(exception_save_regs, 0)
+ rsm psr.dt // turn off data translations
+ ;;
+ srlz.d // serialize
+ ;;
+ mov rIIP=cr.iip
+ mov rIPSR=cr.ipsr
+ mov rISR=cr.isr
+ mov rIFA=cr.ifa
+ mov rPR=pr
+ ;;
+ extr.u r17=rIPSR,32,2 // extract ipsr.cpl
+ ;;
+ cmp.eq p1,p2=r0,r17 // test for kernel mode
+ ;;
+ mov rSP=sp // save sp
+ ;;
+(p2) mov sp=ar.k6 // and switch to kernel stack
+ mov r16=SIZEOF_TRAPFRAME
+ ;;
+ sub sp=sp,r16 // reserve trapframe
+ ;;
+ mov rR1=r1
+ mov rR2=r2
+ ;;
+ dep r1=0,sp,61,3 // r1=&tf_cr_iip
+ ;;
+ add r2=8,r1 // r2=&tf_cr_ipsr
+ ;;
+ st8 [r1]=rIIP,16 // r1=&tf_cr_isr
+ st8 [r2]=rIPSR,16 // r2=&tf_cr_ifa
+ ;;
+ st8 [r1]=rISR,16 // r1=&tf_pr
+ st8 [r2]=rIFA,16 // r2=&tf_ar_rsc
+ ;;
+ st8 [r1]=rPR,16 // r1=&tf_cr_pfs
+
+ mov rB0=r16
+ mov rRSC=ar.rsc
+ mov rPFS=ar.pfs
+ cover
+(p2) mov r16=ar.k7 // curproc
+ mov rIFS=cr.ifs
+ ;;
+(p2) add r16=P_ADDR,r16 // &curproc->p_addr
+ mov ar.rsc=0
+ ;;
+(p2) ld8 r16=[r16] // curproc->p_addr
+ mov rBSPSTORE=ar.bspstore
+ ;;
+(p2) add r16=SIZEOF_USER,r16 // kernel backing store
+ mov rRNAT=ar.rnat
+ mov rBSP=ar.bsp
+ ;;
+(p2) mov ar.bspstore=r16 // switch bspstore
+ st8 [r2]=rRSC,16 // r2=&tf_cr_ifs
+ ;;
+ st8 [r1]=rPFS,16 // r1=&tf_ar_bspstore
+ st8 [r2]=rIFS,16 // r2=&tf_ar_rnat
+ ;;
+ st8 [r1]=rBSPSTORE,16 // r1=&tf_ar_bsp
+ st8 [r2]=rRNAT,16 // r2=&tf_ar_unat
+ ;;
+ st8 [r1]=rBSP,16 // r1=&tf_ar_ccv
+ mov ar.rsc=3 // switch RSE back on
+ mov r16=ar.unat
+ ;;
+ mov r17=ar.ccv
+ st8 [r2]=r16,16 // r2=&tf_ar_fpsr
+ mov r18=ar.fpsr
+ ;;
+ st8 [r1]=r17,16 // r1=&tf_b[0]
+ st8 [r2]=r18,16 // r2=&tf_b[1]
+ mov r17=b1
+ ;;
+ st8 [r1]=rB0,16 // r1=&tf_b[2]
+ mov r18=b2
+ st8 [r2]=r17,16 // r2=&tf_b[3]
+ ;;
+ mov r17=b3
+ st8 [r1]=r18,16 // r1=&tf_b[4]
+ ;;
+ mov r18=b4
+ st8 [r2]=r17,16 // r2=&tf_b[5]
+ ;;
+ mov r17=b5
+ st8 [r1]=r18,16 // r1=&tf_b[6]
+ ;;
+ mov r18=b6
+ st8 [r2]=r17,16 // r2=&tf_b[7]
+ ;;
+ mov r17=b7
+ st8 [r1]=r18,16 // r1=&tf_r[FRAME_R1]
+ ;;
+ st8 [r2]=r17,16 // r2=&tf_r[FRAME_R2]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=rR1,16 // r1=&tf_r[FRAME_R3]
+ .mem.offset 8,0
+ st8.spill [r2]=rR2,16 // r2=&tf_r[FRAME_R4]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r3,16 // r1=&tf_r[FRAME_R5]
+ .mem.offset 8,0
+ st8.spill [r2]=r4,16 // r2=&tf_r[FRAME_R6]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r5,16 // r1=&tf_r[FRAME_R7]
+ .mem.offset 8,0
+ st8.spill [r2]=r6,16 // r2=&tf_r[FRAME_R8]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r7,16 // r1=&tf_r[FRAME_R9]
+ .mem.offset 8,0
+ st8.spill [r2]=r8,16 // r2=&tf_r[FRAME_R10]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r9,16 // r1=&tf_r[FRAME_R11]
+ .mem.offset 8,0
+ st8.spill [r2]=r10,16 // r2=&tf_r[FRAME_SP]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r11,16 // r1=&tf_r[FRAME_R13]
+ .mem.offset 8,0
+ st8.spill [r2]=rSP,16 // r2=&tf_r[FRAME_R14]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r13,16 // r1=&tf_r[FRAME_R15]
+ .mem.offset 8,0
+ st8.spill [r2]=r14,16 // r2=&tf_r[FRAME_R16]
+ ;;
+ .mem.offset 0,0
+ st8.spill [r1]=r15,16 // r1=&tf_r[FRAME_R17]
+ ;;
+ bsw.1 // switch to bank 1
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r16,16 // r2=&tf_r[FRAME_R18]
+ .mem.offset 0,0
+ st8.spill [r1]=r17,16 // r1=&tf_r[FRAME_R19]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r18,16 // r2=&tf_r[FRAME_R20]
+ .mem.offset 0,0
+ st8.spill [r1]=r19,16 // r1=&tf_r[FRAME_R21]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r20,16 // r2=&tf_r[FRAME_R22]
+ .mem.offset 0,0
+ st8.spill [r1]=r21,16 // r1=&tf_r[FRAME_R23]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r22,16 // r2=&tf_r[FRAME_R24]
+ .mem.offset 0,0
+ st8.spill [r1]=r23,16 // r1=&tf_r[FRAME_R25]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r24,16 // r2=&tf_r[FRAME_R26]
+ .mem.offset 0,0
+ st8.spill [r1]=r25,16 // r1=&tf_r[FRAME_R27]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r26,16 // r2=&tf_r[FRAME_R28]
+ .mem.offset 0,0
+ st8.spill [r1]=r27,16 // r1=&tf_r[FRAME_R29]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r28,16 // r2=&tf_r[FRAME_R30]
+ .mem.offset 0,0
+ st8.spill [r1]=r29,16 // r1=&tf_r[FRAME_R31]
+ ;;
+ .mem.offset 8,0
+ st8.spill [r2]=r30,24 // r2=&tf_f[FRAME_F6]
+ .mem.offset 0,0
+ st8.spill [r1]=r31,32 // r1=&tf_f[FRAME_F7]
+ ;;
+ stf.spill [r2]=f6,32 // r2=&tf_f[FRAME_F8]
+ stf.spill [r1]=f7,32 // r1=&tf_f[FRAME_F9]
+ ;;
+ stf.spill [r2]=f8,32 // r2=&tf_f[FRAME_F10]
+ stf.spill [r1]=f9,32 // r1=&tf_f[FRAME_F11]
+ ;;
+ stf.spill [r2]=f10,32 // r2=&tf_f[FRAME_F12]
+ stf.spill [r1]=f11,32 // r1=&tf_f[FRAME_F13]
+ ;;
+ stf.spill [r2]=f12,32 // r2=&tf_f[FRAME_F14]
+ stf.spill [r1]=f13,32 // r1=&tf_f[FRAME_F15]
+ ;;
+ stf.spill [r2]=f14 //
+ stf.spill [r1]=f15 //
+ ;;
+ movl r1=__gp // kernel globals
+ ssm psr.ic|psr.dt // enable interrupts & translation
+ ;;
+ srlz.d // serialize
+
+ br.ret.sptk.few b0
+
+ END(exception_save_regs)
+
+
diff --git a/sys/ia64/ia64/genassym.c b/sys/ia64/ia64/genassym.c
new file mode 100644
index 0000000..e94a2ab
--- /dev/null
+++ b/sys/ia64/ia64/genassym.c
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (c) 1982, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
+ * $FreeBSD$
+ */
+
+#include <stddef.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/assym.h>
+#include <sys/proc.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/errno.h>
+#include <sys/proc.h>
+#include <sys/mount.h>
+#include <sys/socket.h>
+#include <sys/resource.h>
+#include <sys/resourcevar.h>
+#include <sys/ucontext.h>
+#include <machine/frame.h>
+#include <machine/mutex.h>
+#include <sys/vmmeter.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <sys/user.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <nfs/nfsv2.h>
+#include <nfs/rpcv2.h>
+#include <nfs/nfs.h>
+#include <nfs/nfsdiskless.h>
+
+ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
+ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
+ASSYM(MTX_SAVEPSR, offsetof(struct mtx, mtx_savepsr));
+ASSYM(MTX_UNOWNED, MTX_UNOWNED);
+
+ASSYM(P_ADDR, offsetof(struct proc, p_addr));
+ASSYM(P_MD_FLAGS, offsetof(struct proc, p_md.md_flags));
+ASSYM(P_MD_PCBPADDR, offsetof(struct proc, p_md.md_pcbpaddr));
+
+ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
+
+ASSYM(SIZEOF_USER, sizeof(struct user));
+
+ASSYM(U_PCB_R4, offsetof(struct user, u_pcb.pcb_r4));
+ASSYM(U_PCB_R5, offsetof(struct user, u_pcb.pcb_r5));
+ASSYM(U_PCB_R6, offsetof(struct user, u_pcb.pcb_r6));
+ASSYM(U_PCB_R7, offsetof(struct user, u_pcb.pcb_r7));
+
+ASSYM(U_PCB_F2, offsetof(struct user, u_pcb.pcb_f2));
+ASSYM(U_PCB_F3, offsetof(struct user, u_pcb.pcb_f3));
+ASSYM(U_PCB_F4, offsetof(struct user, u_pcb.pcb_f4));
+ASSYM(U_PCB_F5, offsetof(struct user, u_pcb.pcb_f5));
+
+ASSYM(U_PCB_OLD_UNAT, offsetof(struct user, u_pcb.pcb_old_unat));
+ASSYM(U_PCB_SP, offsetof(struct user, u_pcb.pcb_sp));
+ASSYM(U_PCB_PFS, offsetof(struct user, u_pcb.pcb_pfs));
+ASSYM(U_PCB_BSPSTORE, offsetof(struct user, u_pcb.pcb_bspstore));
+
+ASSYM(U_PCB_UNAT, offsetof(struct user, u_pcb.pcb_unat));
+ASSYM(U_PCB_RNAT, offsetof(struct user, u_pcb.pcb_rnat));
+ASSYM(U_PCB_PR, offsetof(struct user, u_pcb.pcb_pr));
+ASSYM(U_PCB_IIP, offsetof(struct user, u_pcb.pcb_iip));
+
+ASSYM(UC_MCONTEXT_MC_AR_BSP, offsetof(ucontext_t, uc_mcontext.mc_ar_bsp));
+ASSYM(UC_MCONTEXT_MC_AR_RNAT, offsetof(ucontext_t, uc_mcontext.mc_ar_rnat));
+
+ASSYM(EFAULT, EFAULT);
+ASSYM(ENAMETOOLONG, ENAMETOOLONG);
+
+ASSYM(SIZEOF_TRAPFRAME, sizeof(struct trapframe));
diff --git a/sys/ia64/ia64/genassym.sh b/sys/ia64/ia64/genassym.sh
new file mode 100644
index 0000000..c8fd249
--- /dev/null
+++ b/sys/ia64/ia64/genassym.sh
@@ -0,0 +1,53 @@
+#!/bin/sh
+# $FreeBSD$
+
+# Grrr, this should use stdin and stdout, but is encrufted for compatibility.
+
+usage() {
+ echo "usage: genassym [-o outfile] objfile"
+ exit 1
+}
+
+outfile=/dev/stdout
+while getopts "o:" option
+do
+ case "$option" in
+ o) outfile="$OPTARG";;
+ *) usage;;
+ esac
+done
+shift $(($OPTIND - 1))
+case $# in
+1) ;;
+*) usage;;
+esac
+
+ia64-unknown-linux-nm "$1" | awk '
+/ C .*sign$/ {
+ sign = substr($1, length($1) - 3, 4)
+ sub("^0*", "", sign)
+ if (sign != "")
+ sign = "-"
+}
+/ C .*w0$/ {
+ w0 = substr($1, length($1) - 3, 4)
+}
+/ C .*w1$/ {
+ w1 = substr($1, length($1) - 3, 4)
+}
+/ C .*w2$/ {
+ w2 = substr($1, length($1) - 3, 4)
+}
+/ C .*w3$/ {
+ w3 = substr($1, length($1) - 3, 4)
+ w = w3 w2 w1 w0
+ sub("^0*", "", w)
+ if (w == "")
+ w = "0"
+ sub("w3$", "", $3)
+ # This still has minor problems representing INT_MIN, etc. E.g.,
+ # with 32-bit 2''s complement ints, this prints -0x80000000, which
+ # has the wrong type (unsigned int).
+ printf("#define\t%s\t%s0x%s\n", $3, sign, w)
+}
+' 3>"$outfile" >&3 3>&-
diff --git a/sys/ia64/ia64/in_cksum.c b/sys/ia64/ia64/in_cksum.c
new file mode 100644
index 0000000..4b7fca2
--- /dev/null
+++ b/sys/ia64/ia64/in_cksum.c
@@ -0,0 +1,249 @@
+/* $FreeBSD$ */
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1996
+ * Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ * (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+
+static const u_int32_t in_masks[] = {
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+u_int64_t in_cksumdata __P((caddr_t buf, int len));
+
+u_int64_t
+in_cksumdata(buf, len)
+ register caddr_t buf;
+ register int len;
+{
+ const u_int32_t *lw = (u_int32_t *) buf;
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (long) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (long) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((long) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(m, len, skip)
+ struct mbuf *m;
+ int len;
+ int skip;
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ if ((clen ^ (long) addr) & 1)
+ sum += in_cksumdata(addr, mlen) << 8;
+ else
+ sum += in_cksumdata(addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(ip)
+ const struct ip *ip;
+{
+ u_int64_t sum = in_cksumdata((caddr_t) ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+ REDUCE16;
+ return (~sum & 0xffff);
+}
diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c
new file mode 100644
index 0000000..ec0ac51
--- /dev/null
+++ b/sys/ia64/ia64/interrupt.c
@@ -0,0 +1,187 @@
+/* $FreeBSD$ */
+/* $NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Authors: Keith Bostic, Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center.
+ * Redistribute and modify at will, leaving only this additional copyright
+ * notice.
+ */
+
+#include "opt_ddb.h"
+
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+
+/* __KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $");*/
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/vmmeter.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+#include <sys/ktr.h>
+
+#include <machine/reg.h>
+#include <machine/frame.h>
+#include <machine/intr.h>
+#include <machine/mutex.h>
+
+#ifdef EVCNT_COUNTERS
+struct evcnt clock_intr_evcnt; /* event counter for clock intrs. */
+#else
+#include <machine/intrcnt.h>
+#endif
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+volatile int mc_expected, mc_received;
+
+static void
+dummy_perf(unsigned long vector, struct trapframe *framep)
+{
+ printf("performance interrupt!\n");
+}
+
+void (*perf_irq)(unsigned long, struct trapframe *) = dummy_perf;
+
+
+static u_int schedclk2;
+
+void
+interrupt(a0, a1, a2, framep)
+ unsigned long a0, a1, a2;
+ struct trapframe *framep;
+{
+#if 0
+ /*
+ * Find our per-cpu globals.
+ */
+ globalp = (struct globaldata *) alpha_pal_rdval();
+
+ atomic_add_int(&PCPU_GET(intr_nesting_level), 1);
+ {
+ struct proc* p = curproc;
+ if (!p) p = &proc0;
+ if ((caddr_t) framep < (caddr_t) p->p_addr + 1024) {
+ mtx_enter(&Giant, MTX_DEF);
+ panic("possible stack overflow\n");
+ }
+ }
+
+ framep->tf_regs[FRAME_TRAPARG_A0] = a0;
+ framep->tf_regs[FRAME_TRAPARG_A1] = a1;
+ framep->tf_regs[FRAME_TRAPARG_A2] = a2;
+ switch (a0) {
+ case ALPHA_INTR_XPROC: /* interprocessor interrupt */
+ CTR0(KTR_INTR|KTR_SMP, "interprocessor interrupt");
+ smp_handle_ipi(framep); /* note: lock not taken */
+ break;
+
+ case ALPHA_INTR_CLOCK: /* clock interrupt */
+ CTR0(KTR_INTR, "clock interrupt");
+ if (PCPU_GET(cpuno) != hwrpb->rpb_primary_cpu_id) {
+ CTR0(KTR_INTR, "ignoring clock on secondary");
+ return;
+ }
+
+ mtx_enter(&Giant, MTX_DEF);
+ cnt.v_intr++;
+#ifdef EVCNT_COUNTERS
+ clock_intr_evcnt.ev_count++;
+#else
+ intrcnt[INTRCNT_CLOCK]++;
+#endif
+ if (platform.clockintr){
+ (*platform.clockintr)(framep);
+ /* divide hz (1024) by 8 to get stathz (128) */
+ if((++schedclk2 & 0x7) == 0)
+ statclock((struct clockframe *)framep);
+ }
+ mtx_exit(&Giant, MTX_DEF);
+ break;
+
+ case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */
+ mtx_enter(&Giant, MTX_DEF);
+ a0 = alpha_pal_rdmces();
+ if (platform.mcheck_handler)
+ (*platform.mcheck_handler)(a0, framep, a1, a2);
+ else
+ machine_check(a0, framep, a1, a2);
+ mtx_exit(&Giant, MTX_DEF);
+ break;
+
+ case ALPHA_INTR_DEVICE: /* I/O device interrupt */
+ mtx_enter(&Giant, MTX_DEF);
+ cnt.v_intr++;
+ if (platform.iointr)
+ (*platform.iointr)(framep, a1);
+ mtx_exit(&Giant, MTX_DEF);
+ break;
+
+ case ALPHA_INTR_PERF: /* interprocessor interrupt */
+ mtx_enter(&Giant, MTX_DEF);
+ perf_irq(a1, framep);
+ mtx_exit(&Giant, MTX_DEF);
+ break;
+
+ case ALPHA_INTR_PASSIVE:
+#if 0
+ printf("passive release interrupt vec 0x%lx (ignoring)\n", a1);
+#endif
+ break;
+
+ default:
+ mtx_enter(&Giant, MTX_DEF);
+ panic("unexpected interrupt: type 0x%lx vec 0x%lx a2 0x%lx\n",
+ a0, a1, a2);
+ /* NOTREACHED */
+ }
+ atomic_subtract_int(&PCPU_GET(intr_nesting_level), 1);
+#endif
+}
+
+
+int
+badaddr(addr, size)
+ void *addr;
+ size_t size;
+{
+ return(badaddr_read(addr, size, NULL));
+}
+
+int
+badaddr_read(addr, size, rptr)
+ void *addr;
+ size_t size;
+ void *rptr;
+{
+ return (1); /* XXX implement */
+}
diff --git a/sys/ia64/ia64/ipl_funcs.c b/sys/ia64/ia64/ipl_funcs.c
new file mode 100644
index 0000000..baf6cb2
--- /dev/null
+++ b/sys/ia64/ia64/ipl_funcs.c
@@ -0,0 +1,222 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/ktr.h>
+#include <sys/interrupt.h>
+#include <machine/ipl.h>
+#include <machine/cpu.h>
+#include <machine/globaldata.h>
+#include <machine/globals.h>
+#include <machine/mutex.h>
+#include <net/netisr.h>
+
+#include "sio.h"
+
+unsigned int bio_imask; /* XXX */
+unsigned int cam_imask; /* XXX */
+unsigned int net_imask; /* XXX */
+unsigned int tty_imask; /* XXX */
+
+static void swi_net(void);
+
+void (*netisrs[32]) __P((void));
+swihand_t *ihandlers[32] = { /* software interrupts */
+ swi_null, swi_net, swi_null, swi_null,
+ swi_null, softclock, swi_null, swi_null,
+ swi_null, swi_null, swi_null, swi_null,
+ swi_null, swi_null, swi_null, swi_null,
+ swi_null, swi_null, swi_null, swi_null,
+ swi_null, swi_null, swi_null, swi_null,
+ swi_null, swi_null, swi_null, swi_null,
+ swi_null, swi_null, swi_null, swi_null,
+};
+
+u_int32_t netisr;
+u_int32_t ipending;
+u_int32_t idelayed;
+
+#define getcpl() (alpha_pal_rdps() & ALPHA_PSL_IPL_MASK)
+
+
+static void atomic_setbit(u_int32_t* p, u_int32_t bit)
+{
+ *p |= bit;
+}
+
+static u_int32_t atomic_readandclear(u_int32_t* p)
+{
+ u_int32_t v = *p;
+ *p = 0;
+ return v;
+}
+
+void
+swi_null()
+{
+ /* No interrupt registered, do nothing */
+}
+
+void
+swi_generic()
+{
+ /* Just a placeholder, we call swi_dispatcher directly */
+ panic("swi_generic() called");
+}
+
+static void
+swi_net()
+{
+ u_int32_t bits = atomic_readandclear(&netisr);
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if (bits & 1)
+ netisrs[i]();
+ bits >>= 1;
+ }
+}
+
+void
+do_sir()
+{
+ u_int32_t pend;
+ int i;
+
+ mtx_enter(&Giant, MTX_DEF);
+
+ atomic_add_int(&PCPU_GET(intr_nesting_level), 1);
+ splsoft();
+ while ((pend = atomic_readandclear(&ipending)) != 0) {
+ for (i = 0; pend && i < 32; i++) {
+ if (pend & (1 << i)) {
+ if (ihandlers[i] == swi_generic)
+ swi_dispatcher(i);
+ else
+ ihandlers[i]();
+ pend &= ~(1 << i);
+ }
+ }
+ }
+ atomic_subtract_int(&PCPU_GET(intr_nesting_level), 1);
+
+ mtx_exit(&Giant, MTX_DEF);
+}
+
+#define GENSET(name, ptr, bit) \
+ \
+void name(void) \
+{ \
+ atomic_setbit(ptr, bit); \
+}
+
+GENSET(setdelayed, &ipending, atomic_readandclear(&idelayed))
+GENSET(setsofttty, &ipending, 1 << SWI_TTY)
+GENSET(setsoftnet, &ipending, 1 << SWI_NET)
+GENSET(setsoftcamnet, &ipending, 1 << SWI_CAMNET)
+GENSET(setsoftcambio, &ipending, 1 << SWI_CAMBIO)
+GENSET(setsoftvm, &ipending, 1 << SWI_VM)
+GENSET(setsofttq, &ipending, 1 << SWI_TQ)
+GENSET(setsoftclock, &ipending, 1 << SWI_CLOCK)
+
+GENSET(schedsofttty, &idelayed, 1 << SWI_TTY)
+GENSET(schedsoftnet, &idelayed, 1 << SWI_NET)
+GENSET(schedsoftcamnet, &idelayed, 1 << SWI_CAMNET)
+GENSET(schedsoftcambio, &idelayed, 1 << SWI_CAMBIO)
+GENSET(schedsoftvm, &idelayed, 1 << SWI_VM)
+GENSET(schedsofttq, &idelayed, 1 << SWI_TQ)
+GENSET(schedsoftclock, &idelayed, 1 << SWI_CLOCK)
+
+#ifdef INVARIANT_SUPPORT
+
+#define SPLASSERT_IGNORE 0
+#define SPLASSERT_LOG 1
+#define SPLASSERT_PANIC 2
+
+static int splassertmode = SPLASSERT_LOG;
+SYSCTL_INT(_kern, OID_AUTO, splassertmode, CTLFLAG_RW,
+ &splassertmode, 0, "Set the mode of SPLASSERT");
+
+static void
+init_splassertmode(void *ignored)
+{
+ TUNABLE_INT_FETCH("kern.splassertmode", 0, splassertmode);
+}
+SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_splassertmode, NULL);
+
+static void
+splassertfail(char *str, const char *msg, char *name, int level)
+{
+ switch (splassertmode) {
+ case SPLASSERT_IGNORE:
+ break;
+ case SPLASSERT_LOG:
+ printf(str, msg, name, level);
+ printf("\n");
+ break;
+ case SPLASSERT_PANIC:
+ panic(str, msg, name, level);
+ break;
+ }
+}
+
+#define GENSPLASSERT(name, pri) \
+void \
+name##assert(const char *msg) \
+{ \
+ u_int cpl; \
+ \
+ cpl = getcpl(); \
+ if (cpl < ALPHA_PSL_IPL_##pri) \
+ splassertfail("%s: not %s, cpl == %#x", \
+ msg, __XSTRING(name) + 3, cpl); \
+}
+#else
+#define GENSPLASSERT(name, pri)
+#endif
+
+GENSPLASSERT(splbio, IO)
+GENSPLASSERT(splcam, IO)
+GENSPLASSERT(splclock, CLOCK)
+GENSPLASSERT(splhigh, HIGH)
+GENSPLASSERT(splimp, IO)
+GENSPLASSERT(splnet, IO)
+GENSPLASSERT(splsoftcam, SOFT)
+GENSPLASSERT(splsoftcambio, SOFT) /* XXX no corresponding spl for alpha */
+GENSPLASSERT(splsoftcamnet, SOFT) /* XXX no corresponding spl for alpha */
+GENSPLASSERT(splsoftclock, SOFT)
+GENSPLASSERT(splsofttty, SOFT) /* XXX no corresponding spl for alpha */
+GENSPLASSERT(splsoftvm, SOFT)
+GENSPLASSERT(splsofttq, SOFT)
+GENSPLASSERT(splstatclock, CLOCK)
+GENSPLASSERT(spltty, IO)
+GENSPLASSERT(splvm, IO)
diff --git a/sys/ia64/ia64/locore.S b/sys/ia64/ia64/locore.S
new file mode 100644
index 0000000..8e38d40
--- /dev/null
+++ b/sys/ia64/ia64/locore.S
@@ -0,0 +1,394 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/cdefs.h>
+#include <machine/asm.h>
+#include <machine/fpu.h>
+#include <sys/syscall.h>
+#include <assym.s>
+
+#ifndef EVCNT_COUNTERS
+#define _LOCORE
+#include <machine/intrcnt.h>
+#endif
+
+/*
+ * Perform actions necessary to switch to a new context. The
+ * hwpcb should be in a0.
+ */
+#define SWITCH_CONTEXT \
+ /* Make a note of the context we're running on. */ \
+ stq a0, curpcb ; \
+ \
+ /* Swap in the new context. */ \
+ call_pal PAL_OSF1_swpctx
+
+ .text
+
+/*
+ * Not really a leaf but we can't return.
+ */
+LEAF(locorestart, 1)
+
+ movl r8=ia64_vector_table /* set up IVT early */
+ movl r9=ia64_vhpt+(1<<8)+(15<<2)+1 /* and VHPT */
+ ;;
+ mov cr.iva=r8
+ mov cr.pta=r9
+ ;;
+ srlz.i
+ ;;
+ srlz.d
+ ;;
+ movl gp=__gp /* find kernel globals */
+ ;;
+ br.call.sptk.many rp=ia64_init
+
+ /* XXX switch to proc0 here */
+ movl r16=proc0
+ ;;
+ add r16=P_ADDR,r16
+ ;;
+ ld8 r16=[r16]
+ ;;
+ add r17=SIZEOF_USER,r16 /* address of backing store */
+ add r18=U_PCB_SP,r16 /* stack pointer */
+ ;;
+ ld8 r18=[r18]
+ mov ar.rsc=0
+ cover
+ ;;
+ flushrs
+ ;;
+ mov ar.bspstore=r17
+ mov sp=r18
+ ;;
+ loadrs
+ mov ar.rsc=3
+ ;;
+ alloc r16=ar.pfs,0,0,0,0
+ ;;
+ br.call.sptk.many rp=mi_startup
+
+ /* NOTREACHED */
+
+#if 0
+ /* Load KGP with current GP. */
+ or a0,zero,s0 /* save pfn */
+ or gp,zero,a0
+ call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */
+ or s0,zero,a0 /* restore pfn */
+
+ /*
+ * Call alpha_init() to do pre-main initialization.
+ * alpha_init() gets the arguments we were called with,
+ * which are already in a0, a1, a2, a3, and a4.
+ */
+ CALL(alpha_init)
+
+ /* Set up the virtual page table pointer. */
+ ldiq a0, VPTBASE
+ call_pal PAL_OSF1_wrvptptr /* clobbers a0, t0, t8-t11 */
+
+ /*
+ * Switch to proc0's PCB, which is at U_PCB off of proc0paddr.
+ */
+ lda t0,proc0 /* get phys addr of pcb */
+ ldq a0,P_MD_PCBPADDR(t0)
+ SWITCH_CONTEXT
+
+ /*
+ * We've switched to a new page table base, so invalidate the TLB
+ * and I-stream. This happens automatically everywhere but here.
+ */
+ ldiq a0, -2 /* TBIA */
+ call_pal PAL_OSF1_tbi
+ call_pal PAL_imb
+
+ /*
+ * Construct a fake trap frame, so execve() can work normally.
+ * Note that setregs() is responsible for setting its contents
+ * to 'reasonable' values.
+ */
+ lda sp,-(FRAME_SIZE * 8)(sp) /* space for struct trapframe */
+ mov sp, a0 /* arg is frame ptr */
+ CALL(mi_startup) /* go to mi_startup()! */
+
+ /*
+ * Call exception_return, to simulate return from (fake)
+ * exception to user-land, running process 1, init!
+ */
+ jmp zero, exception_return /* "And that's all she wrote." */
+
+#endif
+ END(locorestart)
+
+
+/**************************************************************************/
+
+/*
+ * Signal "trampoline" code. Invoked from RTE setup by sendsig().
+ *
+ * On entry, registers look like:
+ *
+ * r14 signal number
+ * r15 pointer to siginfo_t
+ * r16 pointer to signal context frame (scp)
+ * r17 address of handler function descriptor
+ * r18 address of new backing store (if any)
+ * sp pointer to sigframe
+ */
+
+LEAF(sigcode,0)
+ ld8 r8=[r17],8 /* function address */
+ ;;
+ ld8 gp=[r17] /* function's gp value */
+ mov b6=r8 /* transfer to a branch register */
+ cover
+ ;;
+ alloc r5=ar.pfs,0,0,3,0 /* register frame for call */
+ ;;
+ mov out0=r14 /* signal number */
+ add r8=UC_MCONTEXT_MC_AR_BSP,r16 /* address or mc_ar_bsp */
+ mov r9=ar.bsp /* save ar.bsp */
+ ;;
+ st8 [r8]=r9
+ cmp.eq p1,p0=r0,r18 /* check for new bs */
+(p1) br.cond.sptk.few 1f /* branch if not switching */
+ flushrs /* flush out to old bs */
+ mov ar.rsc=0 /* switch off RSE */
+ add r8=UC_MCONTEXT_MC_AR_RNAT,r16 /* address of mc_ar_rnat */
+ ;;
+ mov r9=ar.rnat /* value of ar.rnat after flush */
+ mov ar.bspstore=r18 /* point at new bs */
+ ;;
+ st8 [r8]=r9 /* remember ar.rnat */
+ mov ar.rsc=15 /* XXX bogus value - check */
+ invala
+ ;;
+1: mov out1=r15 /* siginfo */
+ mov out2=r16 /* ucontext */
+ mov r4=r17 /* save ucontext pointer from call */
+ br.call.sptk.few rp=b6 /* call the signal handler */
+(p1) br.cond.sptk.few 2f /* note: p1 is preserved */
+ flushrs
+ mov ar.rsc=0
+ add r8=UC_MCONTEXT_MC_AR_RNAT,r4 /* address of mc_ar_rnat */
+ ;;
+ ld8 r9=[r8]
+ ;;
+ add r8=UC_MCONTEXT_MC_AR_BSP,r4 /* address of mc_ar_bsp */
+ ;;
+ ld8 r10=[r8]
+ ;;
+ mov ar.bspstore=r10
+ ;;
+ mov ar.rnat=r9
+ mov ar.rsc=15
+2:
+ CALLSYS_NOERROR(sigreturn) /* and call sigreturn() with it. */
+ mov out0=ret0 /* if that failed, get error code */
+ CALLSYS_NOERROR(exit) /* and call exit() with it. */
+XLEAF(esigcode)
+ END(sigcode)
+
+ .data
+ EXPORT(szsigcode)
+ .quad esigcode-sigcode
+ .text
+
+/**************************************************************************/
+
+/*
+ * savefpstate: Save a process's floating point state.
+ *
+ * Arguments:
+ * a0 'struct fpstate *' to save into
+ */
+
+LEAF(savefpstate, 1)
+#if 0
+ LDGP(pv)
+ /* save all of the FP registers */
+ lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */
+ stt $f0, (0 * 8)(t1) /* save first register, using hw name */
+ stt $f1, (1 * 8)(t1) /* etc. */
+ stt $f2, (2 * 8)(t1)
+ stt $f3, (3 * 8)(t1)
+ stt $f4, (4 * 8)(t1)
+ stt $f5, (5 * 8)(t1)
+ stt $f6, (6 * 8)(t1)
+ stt $f7, (7 * 8)(t1)
+ stt $f8, (8 * 8)(t1)
+ stt $f9, (9 * 8)(t1)
+ stt $f10, (10 * 8)(t1)
+ stt $f11, (11 * 8)(t1)
+ stt $f12, (12 * 8)(t1)
+ stt $f13, (13 * 8)(t1)
+ stt $f14, (14 * 8)(t1)
+ stt $f15, (15 * 8)(t1)
+ stt $f16, (16 * 8)(t1)
+ stt $f17, (17 * 8)(t1)
+ stt $f18, (18 * 8)(t1)
+ stt $f19, (19 * 8)(t1)
+ stt $f20, (20 * 8)(t1)
+ stt $f21, (21 * 8)(t1)
+ stt $f22, (22 * 8)(t1)
+ stt $f23, (23 * 8)(t1)
+ stt $f24, (24 * 8)(t1)
+ stt $f25, (25 * 8)(t1)
+ stt $f26, (26 * 8)(t1)
+ stt $f27, (27 * 8)(t1)
+ stt $f28, (28 * 8)(t1)
+ stt $f29, (29 * 8)(t1)
+ stt $f30, (30 * 8)(t1)
+
+ /*
+ * Then save the FPCR; note that the necessary 'trapb's are taken
+ * care of on kernel entry and exit.
+ */
+ mf_fpcr ft0
+ stt ft0, FPREG_FPR_CR(a0) /* store to FPCR save area */
+
+ RET
+#endif
+ END(savefpstate)
+
+/**************************************************************************/
+
+/*
+ * restorefpstate: Restore a process's floating point state.
+ *
+ * Arguments:
+ * a0 'struct fpstate *' to restore from
+ */
+
+LEAF(restorefpstate, 1)
+#if 0
+ LDGP(pv)
+ /*
+ * Restore the FPCR; note that the necessary 'trapb's are taken care of
+ * on kernel entry and exit.
+ */
+ ldt ft0, FPREG_FPR_CR(a0) /* load from FPCR save area */
+ mt_fpcr ft0
+
+ /* Restore all of the FP registers. */
+ lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */
+ ldt $f0, (0 * 8)(t1) /* restore first reg., using hw name */
+ ldt $f1, (1 * 8)(t1) /* etc. */
+ ldt $f2, (2 * 8)(t1)
+ ldt $f3, (3 * 8)(t1)
+ ldt $f4, (4 * 8)(t1)
+ ldt $f5, (5 * 8)(t1)
+ ldt $f6, (6 * 8)(t1)
+ ldt $f7, (7 * 8)(t1)
+ ldt $f8, (8 * 8)(t1)
+ ldt $f9, (9 * 8)(t1)
+ ldt $f10, (10 * 8)(t1)
+ ldt $f11, (11 * 8)(t1)
+ ldt $f12, (12 * 8)(t1)
+ ldt $f13, (13 * 8)(t1)
+ ldt $f14, (14 * 8)(t1)
+ ldt $f15, (15 * 8)(t1)
+ ldt $f16, (16 * 8)(t1)
+ ldt $f17, (17 * 8)(t1)
+ ldt $f18, (18 * 8)(t1)
+ ldt $f19, (19 * 8)(t1)
+ ldt $f20, (20 * 8)(t1)
+ ldt $f21, (21 * 8)(t1)
+ ldt $f22, (22 * 8)(t1)
+ ldt $f23, (23 * 8)(t1)
+ ldt $f24, (24 * 8)(t1)
+ ldt $f25, (25 * 8)(t1)
+ ldt $f26, (26 * 8)(t1)
+ ldt $f27, (27 * 8)(t1)
+ ldt $f28, (28 * 8)(t1)
+ ldt $f29, (29 * 8)(t1)
+ ldt $f30, (30 * 8)(t1)
+
+ RET
+#endif
+ END(restorefpstate)
+
+/*
+ * When starting init, call this to configure the process for user
+ * mode. This will be inherited by other processes.
+ */
+ LEAF_NOPROFILE(prepare_usermode, 0)
+ END(prepare_usermode)
+
+ .data
+ EXPORT(proc0paddr)
+ .quad 0
+
+ .text
+
+/* XXX: make systat/vmstat happy */
+ .data
+EXPORT(intrnames)
+ .asciz "clock"
+intr_n = 0
+.rept INTRCNT_COUNT
+ .ascii "intr "
+ .byte intr_n / 10 + '0, intr_n % 10 + '0
+ .asciz " " /* space for platform-specific rewrite */
+ intr_n = intr_n + 1
+.endr
+EXPORT(eintrnames)
+ .align 8
+EXPORT(intrcnt)
+ .fill INTRCNT_COUNT + 1, 8, 0
+EXPORT(eintrcnt)
+ .text
diff --git a/sys/ia64/ia64/locore.s b/sys/ia64/ia64/locore.s
new file mode 100644
index 0000000..8e38d40
--- /dev/null
+++ b/sys/ia64/ia64/locore.s
@@ -0,0 +1,394 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/cdefs.h>
+#include <machine/asm.h>
+#include <machine/fpu.h>
+#include <sys/syscall.h>
+#include <assym.s>
+
+#ifndef EVCNT_COUNTERS
+#define _LOCORE
+#include <machine/intrcnt.h>
+#endif
+
+/*
+ * Perform actions necessary to switch to a new context. The
+ * hwpcb should be in a0.
+ */
+#define SWITCH_CONTEXT \
+ /* Make a note of the context we're running on. */ \
+ stq a0, curpcb ; \
+ \
+ /* Swap in the new context. */ \
+ call_pal PAL_OSF1_swpctx
+
+ .text
+
+/*
+ * Not really a leaf but we can't return.
+ */
+LEAF(locorestart, 1)
+
+ movl r8=ia64_vector_table /* set up IVT early */
+ movl r9=ia64_vhpt+(1<<8)+(15<<2)+1 /* and VHPT */
+ ;;
+ mov cr.iva=r8
+ mov cr.pta=r9
+ ;;
+ srlz.i
+ ;;
+ srlz.d
+ ;;
+ movl gp=__gp /* find kernel globals */
+ ;;
+ br.call.sptk.many rp=ia64_init
+
+ /* XXX switch to proc0 here */
+ movl r16=proc0
+ ;;
+ add r16=P_ADDR,r16
+ ;;
+ ld8 r16=[r16]
+ ;;
+ add r17=SIZEOF_USER,r16 /* address of backing store */
+ add r18=U_PCB_SP,r16 /* stack pointer */
+ ;;
+ ld8 r18=[r18]
+ mov ar.rsc=0
+ cover
+ ;;
+ flushrs
+ ;;
+ mov ar.bspstore=r17
+ mov sp=r18
+ ;;
+ loadrs
+ mov ar.rsc=3
+ ;;
+ alloc r16=ar.pfs,0,0,0,0
+ ;;
+ br.call.sptk.many rp=mi_startup
+
+ /* NOTREACHED */
+
+#if 0
+ /* Load KGP with current GP. */
+ or a0,zero,s0 /* save pfn */
+ or gp,zero,a0
+ call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */
+ or s0,zero,a0 /* restore pfn */
+
+ /*
+ * Call alpha_init() to do pre-main initialization.
+ * alpha_init() gets the arguments we were called with,
+ * which are already in a0, a1, a2, a3, and a4.
+ */
+ CALL(alpha_init)
+
+ /* Set up the virtual page table pointer. */
+ ldiq a0, VPTBASE
+ call_pal PAL_OSF1_wrvptptr /* clobbers a0, t0, t8-t11 */
+
+ /*
+ * Switch to proc0's PCB, which is at U_PCB off of proc0paddr.
+ */
+ lda t0,proc0 /* get phys addr of pcb */
+ ldq a0,P_MD_PCBPADDR(t0)
+ SWITCH_CONTEXT
+
+ /*
+ * We've switched to a new page table base, so invalidate the TLB
+ * and I-stream. This happens automatically everywhere but here.
+ */
+ ldiq a0, -2 /* TBIA */
+ call_pal PAL_OSF1_tbi
+ call_pal PAL_imb
+
+ /*
+ * Construct a fake trap frame, so execve() can work normally.
+ * Note that setregs() is responsible for setting its contents
+ * to 'reasonable' values.
+ */
+ lda sp,-(FRAME_SIZE * 8)(sp) /* space for struct trapframe */
+ mov sp, a0 /* arg is frame ptr */
+ CALL(mi_startup) /* go to mi_startup()! */
+
+ /*
+ * Call exception_return, to simulate return from (fake)
+ * exception to user-land, running process 1, init!
+ */
+ jmp zero, exception_return /* "And that's all she wrote." */
+
+#endif
+ END(locorestart)
+
+
+/**************************************************************************/
+
+/*
+ * Signal "trampoline" code. Invoked from RTE setup by sendsig().
+ *
+ * On entry, registers look like:
+ *
+ * r14 signal number
+ * r15 pointer to siginfo_t
+ * r16 pointer to signal context frame (scp)
+ * r17 address of handler function descriptor
+ * r18 address of new backing store (if any)
+ * sp pointer to sigframe
+ */
+
+LEAF(sigcode,0)
+ ld8 r8=[r17],8 /* function address */
+ ;;
+ ld8 gp=[r17] /* function's gp value */
+ mov b6=r8 /* transfer to a branch register */
+ cover
+ ;;
+ alloc r5=ar.pfs,0,0,3,0 /* register frame for call */
+ ;;
+ mov out0=r14 /* signal number */
+ add r8=UC_MCONTEXT_MC_AR_BSP,r16 /* address or mc_ar_bsp */
+ mov r9=ar.bsp /* save ar.bsp */
+ ;;
+ st8 [r8]=r9
+ cmp.eq p1,p0=r0,r18 /* check for new bs */
+(p1) br.cond.sptk.few 1f /* branch if not switching */
+ flushrs /* flush out to old bs */
+ mov ar.rsc=0 /* switch off RSE */
+ add r8=UC_MCONTEXT_MC_AR_RNAT,r16 /* address of mc_ar_rnat */
+ ;;
+ mov r9=ar.rnat /* value of ar.rnat after flush */
+ mov ar.bspstore=r18 /* point at new bs */
+ ;;
+ st8 [r8]=r9 /* remember ar.rnat */
+ mov ar.rsc=15 /* XXX bogus value - check */
+ invala
+ ;;
+1: mov out1=r15 /* siginfo */
+ mov out2=r16 /* ucontext */
+ mov r4=r17 /* save ucontext pointer from call */
+ br.call.sptk.few rp=b6 /* call the signal handler */
+(p1) br.cond.sptk.few 2f /* note: p1 is preserved */
+ flushrs
+ mov ar.rsc=0
+ add r8=UC_MCONTEXT_MC_AR_RNAT,r4 /* address of mc_ar_rnat */
+ ;;
+ ld8 r9=[r8]
+ ;;
+ add r8=UC_MCONTEXT_MC_AR_BSP,r4 /* address of mc_ar_bsp */
+ ;;
+ ld8 r10=[r8]
+ ;;
+ mov ar.bspstore=r10
+ ;;
+ mov ar.rnat=r9
+ mov ar.rsc=15
+2:
+ CALLSYS_NOERROR(sigreturn) /* and call sigreturn() with it. */
+ mov out0=ret0 /* if that failed, get error code */
+ CALLSYS_NOERROR(exit) /* and call exit() with it. */
+XLEAF(esigcode)
+ END(sigcode)
+
+ .data
+ EXPORT(szsigcode)
+ .quad esigcode-sigcode
+ .text
+
+/**************************************************************************/
+
+/*
+ * savefpstate: Save a process's floating point state.
+ *
+ * Arguments:
+ * a0 'struct fpstate *' to save into
+ */
+
+LEAF(savefpstate, 1)
+#if 0
+ LDGP(pv)
+ /* save all of the FP registers */
+ lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */
+ stt $f0, (0 * 8)(t1) /* save first register, using hw name */
+ stt $f1, (1 * 8)(t1) /* etc. */
+ stt $f2, (2 * 8)(t1)
+ stt $f3, (3 * 8)(t1)
+ stt $f4, (4 * 8)(t1)
+ stt $f5, (5 * 8)(t1)
+ stt $f6, (6 * 8)(t1)
+ stt $f7, (7 * 8)(t1)
+ stt $f8, (8 * 8)(t1)
+ stt $f9, (9 * 8)(t1)
+ stt $f10, (10 * 8)(t1)
+ stt $f11, (11 * 8)(t1)
+ stt $f12, (12 * 8)(t1)
+ stt $f13, (13 * 8)(t1)
+ stt $f14, (14 * 8)(t1)
+ stt $f15, (15 * 8)(t1)
+ stt $f16, (16 * 8)(t1)
+ stt $f17, (17 * 8)(t1)
+ stt $f18, (18 * 8)(t1)
+ stt $f19, (19 * 8)(t1)
+ stt $f20, (20 * 8)(t1)
+ stt $f21, (21 * 8)(t1)
+ stt $f22, (22 * 8)(t1)
+ stt $f23, (23 * 8)(t1)
+ stt $f24, (24 * 8)(t1)
+ stt $f25, (25 * 8)(t1)
+ stt $f26, (26 * 8)(t1)
+ stt $f27, (27 * 8)(t1)
+ stt $f28, (28 * 8)(t1)
+ stt $f29, (29 * 8)(t1)
+ stt $f30, (30 * 8)(t1)
+
+ /*
+ * Then save the FPCR; note that the necessary 'trapb's are taken
+ * care of on kernel entry and exit.
+ */
+ mf_fpcr ft0
+ stt ft0, FPREG_FPR_CR(a0) /* store to FPCR save area */
+
+ RET
+#endif
+ END(savefpstate)
+
+/**************************************************************************/
+
+/*
+ * restorefpstate: Restore a process's floating point state.
+ *
+ * Arguments:
+ * a0 'struct fpstate *' to restore from
+ */
+
+LEAF(restorefpstate, 1)
+#if 0
+ LDGP(pv)
+ /*
+ * Restore the FPCR; note that the necessary 'trapb's are taken care of
+ * on kernel entry and exit.
+ */
+ ldt ft0, FPREG_FPR_CR(a0) /* load from FPCR save area */
+ mt_fpcr ft0
+
+ /* Restore all of the FP registers. */
+ lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */
+ ldt $f0, (0 * 8)(t1) /* restore first reg., using hw name */
+ ldt $f1, (1 * 8)(t1) /* etc. */
+ ldt $f2, (2 * 8)(t1)
+ ldt $f3, (3 * 8)(t1)
+ ldt $f4, (4 * 8)(t1)
+ ldt $f5, (5 * 8)(t1)
+ ldt $f6, (6 * 8)(t1)
+ ldt $f7, (7 * 8)(t1)
+ ldt $f8, (8 * 8)(t1)
+ ldt $f9, (9 * 8)(t1)
+ ldt $f10, (10 * 8)(t1)
+ ldt $f11, (11 * 8)(t1)
+ ldt $f12, (12 * 8)(t1)
+ ldt $f13, (13 * 8)(t1)
+ ldt $f14, (14 * 8)(t1)
+ ldt $f15, (15 * 8)(t1)
+ ldt $f16, (16 * 8)(t1)
+ ldt $f17, (17 * 8)(t1)
+ ldt $f18, (18 * 8)(t1)
+ ldt $f19, (19 * 8)(t1)
+ ldt $f20, (20 * 8)(t1)
+ ldt $f21, (21 * 8)(t1)
+ ldt $f22, (22 * 8)(t1)
+ ldt $f23, (23 * 8)(t1)
+ ldt $f24, (24 * 8)(t1)
+ ldt $f25, (25 * 8)(t1)
+ ldt $f26, (26 * 8)(t1)
+ ldt $f27, (27 * 8)(t1)
+ ldt $f28, (28 * 8)(t1)
+ ldt $f29, (29 * 8)(t1)
+ ldt $f30, (30 * 8)(t1)
+
+ RET
+#endif
+ END(restorefpstate)
+
+/*
+ * When starting init, call this to configure the process for user
+ * mode. This will be inherited by other processes.
+ */
+ LEAF_NOPROFILE(prepare_usermode, 0)
+ END(prepare_usermode)
+
+ .data
+ EXPORT(proc0paddr)
+ .quad 0
+
+ .text
+
+/* XXX: make systat/vmstat happy */
+ .data
+EXPORT(intrnames)
+ .asciz "clock"
+intr_n = 0
+.rept INTRCNT_COUNT
+ .ascii "intr "
+ .byte intr_n / 10 + '0, intr_n % 10 + '0
+ .asciz " " /* space for platform-specific rewrite */
+ intr_n = intr_n + 1
+.endr
+EXPORT(eintrnames)
+ .align 8
+EXPORT(intrcnt)
+ .fill INTRCNT_COUNT + 1, 8, 0
+EXPORT(eintrcnt)
+ .text
diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c
new file mode 100644
index 0000000..051f941
--- /dev/null
+++ b/sys/ia64/ia64/machdep.c
@@ -0,0 +1,1364 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_compat.h"
+#include "opt_ddb.h"
+#include "opt_simos.h"
+#include "opt_msgbuf.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/eventhandler.h>
+#include <sys/sysproto.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/reboot.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/mbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/msgbuf.h>
+#include <sys/exec.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+#include <sys/linker.h>
+#include <sys/random.h>
+#include <net/netisr.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+#include <sys/user.h>
+#include <sys/ptrace.h>
+#include <machine/clock.h>
+#include <machine/md_var.h>
+#include <machine/reg.h>
+#include <machine/fpu.h>
+#include <machine/pal.h>
+#include <machine/efi.h>
+#include <machine/bootinfo.h>
+#include <machine/mutex.h>
+#include <machine/vmparam.h>
+#include <machine/elf.h>
+#include <ddb/ddb.h>
+#include <alpha/alpha/db_instruction.h>
+#include <sys/vnode.h>
+#include <miscfs/procfs/procfs.h>
+#include <machine/sigframe.h>
+
+u_int64_t cycles_per_usec;
+u_int32_t cycles_per_sec;
+int cold = 1;
+struct bootinfo_kernel bootinfo;
+
+struct cpuhead cpuhead;
+
+struct mtx sched_lock;
+struct mtx Giant;
+
+struct user *proc0paddr;
+
+char machine[] = "ia64";
+SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
+
+static char cpu_model[128];
+SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0, "");
+
+#ifdef DDB
+/* start and end of kernel symbol table */
+void *ksym_start, *ksym_end;
+#endif
+
+int ia64_unaligned_print = 1; /* warn about unaligned accesses */
+int ia64_unaligned_fix = 1; /* fix up unaligned accesses */
+int ia64_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */
+
+SYSCTL_INT(_machdep, CPU_UNALIGNED_PRINT, unaligned_print,
+ CTLFLAG_RW, &ia64_unaligned_print, 0, "");
+
+SYSCTL_INT(_machdep, CPU_UNALIGNED_FIX, unaligned_fix,
+ CTLFLAG_RW, &ia64_unaligned_fix, 0, "");
+
+SYSCTL_INT(_machdep, CPU_UNALIGNED_SIGBUS, unaligned_sigbus,
+ CTLFLAG_RW, &ia64_unaligned_sigbus, 0, "");
+
+static void cpu_startup __P((void *));
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
+
+static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
+
+struct msgbuf *msgbufp=0;
+
+int bootverbose = 0, Maxmem = 0;
+long dumplo;
+
+int totalphysmem; /* total amount of physical memory in system */
+int physmem; /* physical memory used by NetBSD + some rsvd */
+int resvmem; /* amount of memory reserved for PROM */
+int unusedmem; /* amount of memory for OS that we don't use */
+int unknownmem; /* amount of memory with an unknown use */
+int ncpus; /* number of cpus */
+
+vm_offset_t phys_avail[10];
+
+static int
+sysctl_hw_physmem(SYSCTL_HANDLER_ARGS)
+{
+ int error = sysctl_handle_int(oidp, 0, ia64_ptob(physmem), req);
+ return (error);
+}
+
+SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD,
+ 0, 0, sysctl_hw_physmem, "I", "");
+
+static int
+sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
+{
+ int error = sysctl_handle_int(oidp, 0,
+ ia64_ptob(physmem - cnt.v_wire_count), req);
+ return (error);
+}
+
+SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD,
+ 0, 0, sysctl_hw_usermem, "I", "");
+
+SYSCTL_INT(_hw, OID_AUTO, availpages, CTLFLAG_RD, &physmem, 0, "");
+
+/* must be 2 less so 0 0 can signal end of chunks */
+#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
+
+static void identifycpu __P((void));
+
+static vm_offset_t buffer_sva, buffer_eva;
+vm_offset_t clean_sva, clean_eva;
+static vm_offset_t pager_sva, pager_eva;
+
+#define offsetof(type, member) ((size_t)(&((type *)0)->member))
+
+static void
+cpu_startup(dummy)
+ void *dummy;
+{
+ unsigned int i;
+ caddr_t v;
+ vm_offset_t maxaddr;
+ vm_size_t size = 0;
+ vm_offset_t firstaddr;
+ vm_offset_t minaddr;
+
+ if (boothowto & RB_VERBOSE)
+ bootverbose++;
+
+ /*
+ * Good {morning,afternoon,evening,night}.
+ */
+ printf("%s", version);
+ identifycpu();
+
+ /* startrtclock(); */
+#ifdef PERFMON
+ perfmon_init();
+#endif
+ printf("real memory = %ld (%ldK bytes)\n", ia64_ptob(Maxmem), ia64_ptob(Maxmem) / 1024);
+
+ /*
+ * Display any holes after the first chunk of extended memory.
+ */
+ if (bootverbose) {
+ int indx;
+
+ printf("Physical memory chunk(s):\n");
+ for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
+ int size1 = phys_avail[indx + 1] - phys_avail[indx];
+
+ printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx],
+ phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE);
+ }
+ }
+
+ /*
+ * Calculate callout wheel size
+ */
+ for (callwheelsize = 1, callwheelbits = 0;
+ callwheelsize < ncallout;
+ callwheelsize <<= 1, ++callwheelbits)
+ ;
+ callwheelmask = callwheelsize - 1;
+
+ /*
+ * Allocate space for system data structures.
+ * The first available kernel virtual address is in "v".
+ * As pages of kernel virtual memory are allocated, "v" is incremented.
+ * As pages of memory are allocated and cleared,
+ * "firstaddr" is incremented.
+ * An index into the kernel page table corresponding to the
+ * virtual memory address maintained in "v" is kept in "mapaddr".
+ */
+
+ /*
+ * Make two passes. The first pass calculates how much memory is
+ * needed and allocates it. The second pass assigns virtual
+ * addresses to the various data structures.
+ */
+ firstaddr = 0;
+again:
+ v = (caddr_t)firstaddr;
+
+#define valloc(name, type, num) \
+ (name) = (type *)v; v = (caddr_t)((name)+(num))
+#define valloclim(name, type, num, lim) \
+ (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
+
+ valloc(callout, struct callout, ncallout);
+ valloc(callwheel, struct callout_tailq, callwheelsize);
+
+ /*
+ * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
+ * For the first 64MB of ram nominally allocate sufficient buffers to
+ * cover 1/4 of our ram. Beyond the first 64MB allocate additional
+ * buffers to cover 1/20 of our ram over 64MB.
+ */
+
+ if (nbuf == 0) {
+ int factor = 4 * BKVASIZE / PAGE_SIZE;
+
+ nbuf = 50;
+ if (physmem > 1024)
+ nbuf += min((physmem - 1024) / factor, 16384 / factor);
+ if (physmem > 16384)
+ nbuf += (physmem - 16384) * 2 / (factor * 5);
+ }
+ nswbuf = max(min(nbuf/4, 64), 16);
+
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
+ v = bufhashinit(v);
+
+ /*
+ * End of first pass, size has been calculated so allocate memory
+ */
+ if (firstaddr == 0) {
+ size = (vm_size_t)(v - firstaddr);
+ firstaddr = (vm_offset_t)kmem_alloc(kernel_map, round_page(size));
+ if (firstaddr == 0)
+ panic("startup: no room for tables");
+ goto again;
+ }
+
+ /*
+ * End of second pass, addresses have been assigned
+ */
+ if ((vm_size_t)(v - firstaddr) != size)
+ panic("startup: table size inconsistency");
+
+ clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
+ (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf*BKVASIZE));
+ pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
+ (nswbuf*MAXPHYS) + pager_map_size);
+ pager_map->system_map = 1;
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ (16*(ARG_MAX+(PAGE_SIZE*3))));
+
+ /*
+ * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
+ * we use the more space efficient malloc in place of kmem_alloc.
+ */
+ {
+ vm_offset_t mb_map_size;
+
+ mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES +
+ (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt);
+ mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl,
+ &maxaddr, mb_map_size);
+ mb_map->system_map = 1;
+ }
+
+ /*
+ * Initialize callouts
+ */
+ SLIST_INIT(&callfree);
+ for (i = 0; i < ncallout; i++) {
+ callout_init(&callout[i]);
+ callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
+ SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
+ }
+
+ for (i = 0; i < callwheelsize; i++) {
+ TAILQ_INIT(&callwheel[i]);
+ }
+
+#if defined(USERCONFIG)
+#if defined(USERCONFIG_BOOT)
+ if (1)
+#else
+ if (boothowto & RB_CONFIG)
+#endif
+ {
+ userconfig();
+ cninit(); /* the preferred console may have changed */
+ }
+#endif
+
+ printf("avail memory = %ld (%ldK bytes)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count) / 1024);
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+ vm_pager_bufferinit();
+}
+
+int
+register_netisr(num, handler)
+ int num;
+ netisr_t *handler;
+{
+
+ if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
+ printf("register_netisr: bad isr number: %d\n", num);
+ return (EINVAL);
+ }
+ netisrs[num] = handler;
+ return (0);
+}
+
+int
+unregister_netisr(num)
+ int num;
+{
+
+ if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
+ printf("unregister_netisr: bad isr number: %d\n", num);
+ return (EINVAL);
+ }
+ netisrs[num] = NULL;
+ return (0);
+}
+
+static void
+identifycpu(void)
+{
+ /* print cpu type & version */
+}
+
+extern char kernel_text[], _end[];
+
+#define DEBUG_MD
+
+void
+ia64_init()
+{
+ int phys_avail_cnt;
+ vm_offset_t kernstart, kernend;
+ vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
+ char *p;
+ struct efi_memory_descriptor ski_md[2]; /* XXX */
+ struct efi_memory_descriptor *mdp;
+ int mdcount, i;
+
+ /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
+
+ /*
+ * TODO: Disable interrupts, floating point etc.
+ * Maybe flush cache and tlb
+ */
+ __asm __volatile("mov ar.fpsr=%0" :: "r"(IA64_FPSR_DEFAULT));
+
+ /*
+ * TODO: Get critical system information (if possible, from the
+ * information provided by the boot program).
+ */
+
+ /*
+ * Initalize the (temporary) bootstrap console interface, so
+ * we can use printf until the VM system starts being setup.
+ * The real console is initialized before then.
+ * TODO: I guess we start with a serial console here.
+ */
+ ssccnattach();
+
+ /* OUTPUT NOW ALLOWED */
+
+ /*
+ * Find the beginning and end of the kernel.
+ */
+ kernstart = trunc_page(kernel_text);
+#ifdef DDB
+ ksym_start = (void *)bootinfo.ssym;
+ ksym_end = (void *)bootinfo.esym;
+ kernend = (vm_offset_t)round_page(ksym_end);
+#else
+ kernend = (vm_offset_t)round_page(_end);
+#endif
+ /* But if the bootstrap tells us otherwise, believe it! */
+ if (bootinfo.kernend)
+ kernend = round_page(bootinfo.kernend);
+ preload_metadata = (caddr_t)bootinfo.modptr;
+ kern_envp = bootinfo.envp;
+
+ p = getenv("kernelname");
+ if (p)
+ strncpy(kernelname, p, sizeof(kernelname) - 1);
+
+ kernstartpfn = atop(IA64_RR_MASK(kernstart));
+ kernendpfn = atop(IA64_RR_MASK(kernend));
+
+ /*
+ * Size the memory regions and load phys_avail[] with the results.
+ */
+
+ /*
+ * XXX hack for ski. In reality, the loader will probably ask
+ * EFI and pass the results to us. Possibly, we will call EFI
+ * directly.
+ */
+ ski_md[0].emd_type = EFI_CONVENTIONAL_MEMORY;
+ ski_md[0].emd_physical_start = 2L*1024*1024;
+ ski_md[0].emd_virtul_start = 0;
+ ski_md[0].emd_number_of_pages = (64L*1024*1024)>>12;
+ ski_md[0].emd_attribute = EFI_MEMORY_WB;
+
+ ski_md[1].emd_type = EFI_CONVENTIONAL_MEMORY;
+ ski_md[1].emd_physical_start = 4096L*1024*1024;
+ ski_md[1].emd_virtul_start = 0;
+ ski_md[1].emd_number_of_pages = (32L*1024*1024)>>12;
+ ski_md[1].emd_attribute = EFI_MEMORY_WB;
+
+ mdcount = 1; /* ignore the high memory for now */
+
+ /*
+ * Find out how much memory is available, by looking at
+ * the memory descriptors.
+ */
+#ifdef DEBUG_MD
+ printf("Memory descriptor count: %d\n", mdcount);
+#endif
+
+ phys_avail_cnt = 0;
+ for (i = 0; i < mdcount; i++) {
+ mdp = &ski_md[i];
+#ifdef DEBUG_MD
+ printf("MD %d: type %d pa 0x%lx cnt 0x%lx\n", i,
+ mdp->emd_type,
+ mdp->emd_physical_start,
+ mdp->emd_number_of_pages);
+#endif
+ totalphysmem += mdp->emd_number_of_pages;
+
+ if (mdp->emd_type != EFI_CONVENTIONAL_MEMORY) {
+ resvmem += mdp->emd_number_of_pages;
+ continue;
+ }
+
+ /*
+ * We have a memory descriptors available for system
+ * software use. We must determine if this cluster
+ * holds the kernel.
+ */
+ physmem += mdp->emd_number_of_pages;
+ pfn0 = atop(mdp->emd_physical_start);
+ pfn1 = pfn0 + mdp->emd_number_of_pages;
+ if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
+ /*
+ * Must compute the location of the kernel
+ * within the segment.
+ */
+#ifdef DEBUG_MD
+ printf("Descriptor %d contains kernel\n", i);
+#endif
+ if (pfn0 < kernstartpfn) {
+ /*
+ * There is a chunk before the kernel.
+ */
+#ifdef DEBUG_MD
+ printf("Loading chunk before kernel: "
+ "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
+#endif
+ phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
+ phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
+ phys_avail_cnt += 2;
+ }
+ if (kernendpfn < pfn1) {
+ /*
+ * There is a chunk after the kernel.
+ */
+#ifdef DEBUG_MD
+ printf("Loading chunk after kernel: "
+ "0x%lx / 0x%lx\n", kernendpfn, pfn1);
+#endif
+ phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
+ phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
+ phys_avail_cnt += 2;
+ }
+ } else {
+ /*
+ * Just load this cluster as one chunk.
+ */
+#ifdef DEBUG_MD
+ printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
+ pfn0, pfn1);
+#endif
+ phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
+ phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
+ phys_avail_cnt += 2;
+
+ }
+ }
+ phys_avail[phys_avail_cnt] = 0;
+
+ Maxmem = physmem;
+
+ /*
+ * Initialize error message buffer (at end of core).
+ */
+ {
+ size_t sz = round_page(MSGBUF_SIZE);
+ int i = phys_avail_cnt - 2;
+
+ /* shrink so that it'll fit in the last segment */
+ if (phys_avail[i+1] - phys_avail[i] < sz)
+ sz = phys_avail[i+1] - phys_avail[i];
+
+ phys_avail[i+1] -= sz;
+ msgbufp = (struct msgbuf*) IA64_PHYS_TO_RR7(phys_avail[i+1]);
+
+ msgbufinit(msgbufp, sz);
+
+ /* Remove the last segment if it now has no pages. */
+ if (phys_avail[i] == phys_avail[i+1])
+ phys_avail[i] = 0;
+
+ /* warn if the message buffer had to be shrunk */
+ if (sz != round_page(MSGBUF_SIZE))
+ printf("WARNING: %ld bytes not available for msgbuf in last cluster (%ld used)\n",
+ round_page(MSGBUF_SIZE), sz);
+
+ }
+
+ /*
+ * Init mapping for u page(s) for proc 0
+ */
+ proc0.p_addr = proc0paddr =
+ (struct user *)pmap_steal_memory(UPAGES * PAGE_SIZE);
+
+ /*
+ * Setup the global data for the bootstrap cpu.
+ */
+ {
+ size_t sz = round_page(UPAGES * PAGE_SIZE);
+ globalp = (struct globaldata *) pmap_steal_memory(sz);
+ globaldata_init(globalp, 0, sz);
+ PCPU_GET(next_asn) = 1; /* 0 used for proc0 pmap */
+ }
+
+ /*
+ * Initialize the virtual memory system, and set the
+ * page table base register in proc 0's PCB.
+ */
+ pmap_bootstrap();
+
+ /*
+ * Initialize the rest of proc 0's PCB, and cache its physical
+ * address.
+ */
+ proc0.p_md.md_pcbpaddr =
+ (struct pcb *)IA64_RR_MASK((vm_offset_t)&proc0paddr->u_pcb);
+
+ /*
+ * Set the kernel sp, reserving space for an (empty) trapframe,
+ * and make proc0's trapframe pointer point to it for sanity.
+ */
+ proc0paddr->u_pcb.pcb_sp =
+ (u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe);
+ proc0.p_md.md_tf =
+ (struct trapframe *)proc0paddr->u_pcb.pcb_sp;
+ PCPU_SET(curproc, &proc0);
+
+ /*
+ * Record all cpus in a list.
+ */
+ SLIST_INIT(&cpuhead);
+ SLIST_INSERT_HEAD(&cpuhead, GLOBALP, gd_allcpu);
+
+ /*
+ * Initialise mutexes.
+ */
+ mtx_init(&Giant, "Giant", MTX_DEF);
+ mtx_init(&sched_lock, "sched lock", MTX_SPIN);
+
+#if 0
+ /*
+ * Enable interrupts on first release (in switch_trampoline).
+ */
+ sched_lock.mtx_saveipl = ALPHA_PSL_IPL_0;
+#endif
+
+ /*
+ * Look at arguments passed to us and compute boothowto.
+ */
+ boothowto = 0;
+#ifdef KADB
+ boothowto |= RB_KDB;
+#endif
+/* boothowto |= RB_KDB | RB_GDB; */
+ for (p = bootinfo.boot_flags; p && *p != '\0'; p++) {
+ /*
+ * Note that we'd really like to differentiate case here,
+ * but the Ia64 AXP Architecture Reference Manual
+ * says that we shouldn't.
+ */
+ switch (*p) {
+ case 'a': /* autoboot */
+ case 'A':
+ boothowto &= ~RB_SINGLE;
+ break;
+
+#ifdef DEBUG
+ case 'c': /* crash dump immediately after autoconfig */
+ case 'C':
+ boothowto |= RB_DUMP;
+ break;
+#endif
+
+#if defined(DDB)
+ case 'd': /* break into the kernel debugger ASAP */
+ case 'D':
+ boothowto |= RB_KDB;
+ break;
+ case 'g': /* use kernel gdb */
+ case 'G':
+ boothowto |= RB_GDB;
+ break;
+#endif
+
+ case 'h': /* always halt, never reboot */
+ case 'H':
+ boothowto |= RB_HALT;
+ break;
+
+#if 0
+ case 'm': /* mini root present in memory */
+ case 'M':
+ boothowto |= RB_MINIROOT;
+ break;
+#endif
+
+ case 'n': /* askname */
+ case 'N':
+ boothowto |= RB_ASKNAME;
+ break;
+
+ case 's': /* single-user (default, supported for sanity) */
+ case 'S':
+ boothowto |= RB_SINGLE;
+ break;
+
+ case 'v':
+ case 'V':
+ boothowto |= RB_VERBOSE;
+ bootverbose = 1;
+ break;
+
+ default:
+ printf("Unrecognized boot flag '%c'.\n", *p);
+ break;
+ }
+ }
+
+ /*
+ * Catch case of boot_verbose set in environment.
+ */
+ if ((p = getenv("boot_verbose")) != NULL) {
+ if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
+ boothowto |= RB_VERBOSE;
+ bootverbose = 1;
+ }
+ }
+
+ /*
+ * Initialize debuggers, and break into them if appropriate.
+ */
+#ifdef DDB
+ kdb_init();
+ if (boothowto & RB_KDB) {
+ printf("Boot flags requested debugger\n");
+ breakpoint();
+ }
+#endif
+}
+
+void
+bzero(void *buf, size_t len)
+{
+ caddr_t p = buf;
+
+ while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
+ *p++ = 0;
+ len--;
+ }
+ while (len >= sizeof(u_long) * 8) {
+ *(u_long*) p = 0;
+ *((u_long*) p + 1) = 0;
+ *((u_long*) p + 2) = 0;
+ *((u_long*) p + 3) = 0;
+ len -= sizeof(u_long) * 8;
+ *((u_long*) p + 4) = 0;
+ *((u_long*) p + 5) = 0;
+ *((u_long*) p + 6) = 0;
+ *((u_long*) p + 7) = 0;
+ p += sizeof(u_long) * 8;
+ }
+ while (len >= sizeof(u_long)) {
+ *(u_long*) p = 0;
+ len -= sizeof(u_long);
+ p += sizeof(u_long);
+ }
+ while (len) {
+ *p++ = 0;
+ len--;
+ }
+}
+
+void
+DELAY(int n)
+{
+ /* TODO */
+}
+
+/*
+ * Send an interrupt to process.
+ *
+ * Stack is set up to allow sigcode stored
+ * at top to call routine, followed by kcall
+ * to sigreturn routine below. After sigreturn
+ * resets the signal mask, the stack, and the
+ * frame pointer, it returns to the user
+ * specified pc, psl.
+ */
+void
+sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
+{
+ struct proc *p = curproc;
+ struct trapframe *frame;
+ struct sigacts *psp = p->p_sigacts;
+ struct sigframe sf, *sfp;
+ u_int64_t sbs = 0;
+ int oonstack, rndfsize;
+
+ frame = p->p_md.md_tf;
+ oonstack = (p->p_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0;
+ rndfsize = ((sizeof(sf) + 15) / 16) * 16;
+
+ /* save user context */
+ bzero(&sf, sizeof(struct sigframe));
+ sf.sf_uc.uc_sigmask = *mask;
+ sf.sf_uc.uc_stack = p->p_sigstk;
+ sf.sf_uc.uc_mcontext.mc_flags = IA64_MC_FLAG_ONSTACK;
+
+ sf.sf_uc.uc_mcontext.mc_nat = 0; /* XXX */
+ sf.sf_uc.uc_mcontext.mc_sp = frame->tf_r[FRAME_SP];
+ sf.sf_uc.uc_mcontext.mc_ip = frame->tf_cr_iip;
+ sf.sf_uc.uc_mcontext.mc_cfm = 0; /* XXX */
+ sf.sf_uc.uc_mcontext.mc_um = frame->tf_cr_ipsr & 0x1fff;
+ sf.sf_uc.uc_mcontext.mc_ar_rsc = frame->tf_ar_rsc;
+ sf.sf_uc.uc_mcontext.mc_ar_bsp = frame->tf_ar_bsp;
+ sf.sf_uc.uc_mcontext.mc_ar_rnat = frame->tf_ar_rnat;
+ sf.sf_uc.uc_mcontext.mc_ar_ccv = frame->tf_ar_ccv;
+ sf.sf_uc.uc_mcontext.mc_ar_unat = frame->tf_ar_unat;
+ sf.sf_uc.uc_mcontext.mc_ar_fpsr = frame->tf_ar_fpsr;
+ sf.sf_uc.uc_mcontext.mc_ar_pfs = frame->tf_ar_pfs;
+ sf.sf_uc.uc_mcontext.mc_pr = frame->tf_pr;
+
+ bcopy(&frame->tf_b[0],
+ &sf.sf_uc.uc_mcontext.mc_br[0],
+ 8 * sizeof(unsigned long));
+ bcopy(&frame->tf_r[0],
+ &sf.sf_uc.uc_mcontext.mc_gr[0],
+ 31 * sizeof(unsigned long));
+
+ /* XXX mc_fr[] */
+
+ /*
+ * Allocate and validate space for the signal handler
+ * context. Note that if the stack is in P0 space, the
+ * call to grow() is a nop, and the useracc() check
+ * will fail if the process has not already allocated
+ * the space with a `brk'.
+ */
+ if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack &&
+ SIGISMEMBER(psp->ps_sigonstack, sig)) {
+ sbs = (u_int64_t) p->p_sigstk.ss_sp;
+ sfp = (struct sigframe *)((caddr_t)p->p_sigstk.ss_sp +
+ p->p_sigstk.ss_size - rndfsize);
+ p->p_sigstk.ss_flags |= SS_ONSTACK;
+ } else
+ sfp = (struct sigframe *)(frame->tf_r[FRAME_SP] - rndfsize);
+
+ (void)grow_stack(p, (u_long)sfp);
+#ifdef DEBUG
+ if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig(%d): sig %d ssp %p usp %p\n", p->p_pid,
+ sig, &sf, sfp);
+#endif
+ if (!useracc((caddr_t)sfp, sizeof(sf), VM_PROT_WRITE)) {
+#ifdef DEBUG
+ if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig(%d): useracc failed on sig %d\n",
+ p->p_pid, sig);
+#endif
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+ SIGACTION(p, SIGILL) = SIG_DFL;
+ SIGDELSET(p->p_sigignore, SIGILL);
+ SIGDELSET(p->p_sigcatch, SIGILL);
+ SIGDELSET(p->p_sigmask, SIGILL);
+ psignal(p, SIGILL);
+ return;
+ }
+
+#if 0
+ /* save the floating-point state, if necessary, then copy it. */
+ ia64_fpstate_save(p, 1);
+ sf.sf_uc.uc_mcontext.mc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
+ bcopy(&p->p_addr->u_pcb.pcb_fp,
+ (struct fpreg *)sf.sf_uc.uc_mcontext.mc_fpregs,
+ sizeof(struct fpreg));
+ sf.sf_uc.uc_mcontext.mc_fp_control = p->p_addr->u_pcb.pcb_fp_control;
+#endif
+
+#ifdef COMPAT_OSF1
+ /*
+ * XXX Create an OSF/1-style sigcontext and associated goo.
+ */
+#endif
+
+ /*
+ * copy the frame out to userland.
+ */
+ (void) copyout((caddr_t)&sf, (caddr_t)sfp, sizeof(sf));
+#ifdef DEBUG
+ if (sigdebug & SDB_FOLLOW)
+ printf("sendsig(%d): sig %d sfp %p code %lx\n", p->p_pid, sig,
+ sfp, code);
+#endif
+
+ /*
+ * Set up the registers to return to sigcode.
+ */
+ frame->tf_cr_iip = PS_STRINGS - (esigcode - sigcode);
+ frame->tf_r[FRAME_R1] = sig;
+ if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) {
+ frame->tf_r[FRAME_R2] = (u_int64_t)&(sfp->sf_si);
+
+ /* Fill in POSIX parts */
+ sf.sf_si.si_signo = sig;
+ sf.sf_si.si_code = code;
+ sf.sf_si.si_addr = (void*)frame->tf_cr_ifa;
+ }
+ else
+ frame->tf_r[FRAME_R2] = code;
+
+ frame->tf_r[FRAME_R3] = (u_int64_t)&(sfp->sf_uc);
+ frame->tf_r[FRAME_R4] = (u_int64_t)catcher;
+ frame->tf_r[FRAME_R5] = sbs;
+ frame->tf_r[FRAME_SP] = (unsigned long)sfp;
+
+#ifdef DEBUG
+ if (sigdebug & SDB_FOLLOW)
+ printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid,
+ frame->tf_cr_iip, frame->tf_regs[FRAME_R4]);
+ if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig(%d): sig %d returns\n",
+ p->p_pid, sig);
+#endif
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * state to gain improper privileges.
+ */
+int
+osigreturn(struct proc *p,
+ struct osigreturn_args /* {
+ struct osigcontext *sigcntxp;
+ } */ *uap)
+{
+ return EOPNOTSUPP;
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * state to gain improper privileges.
+ */
+
+int
+sigreturn(struct proc *p,
+ struct sigreturn_args /* {
+ ucontext_t *sigcntxp;
+ } */ *uap)
+{
+#if 0
+ ucontext_t uc, *ucp;
+ struct pcb *pcb;
+ unsigned long val;
+
+ ucp = uap->sigcntxp;
+ pcb = &p->p_addr->u_pcb;
+
+#ifdef DEBUG
+ if (sigdebug & SDB_FOLLOW)
+ printf("sigreturn: pid %d, scp %p\n", p->p_pid, ucp);
+#endif
+
+ /*
+ * Fetch the entire context structure at once for speed.
+ */
+ if (copyin((caddr_t)ucp, (caddr_t)&uc, sizeof(ucontext_t)))
+ return (EFAULT);
+
+ /*
+ * Restore the user-supplied information
+ */
+ set_regs(p, (struct reg *)uc.uc_mcontext.mc_regs);
+ val = (uc.uc_mcontext.mc_regs[R_PS] | IA64_PSL_USERSET) &
+ ~IA64_PSL_USERCLR;
+ p->p_md.md_tf->tf_regs[FRAME_PS] = val;
+ p->p_md.md_tf->tf_regs[FRAME_PC] = uc.uc_mcontext.mc_regs[R_PC];
+ ia64_pal_wrusp(uc.uc_mcontext.mc_regs[R_SP]);
+
+ if (uc.uc_mcontext.mc_onstack & 1)
+ p->p_sigstk.ss_flags |= SS_ONSTACK;
+ else
+ p->p_sigstk.ss_flags &= ~SS_ONSTACK;
+
+ p->p_sigmask = uc.uc_sigmask;
+ SIG_CANTMASK(p->p_sigmask);
+
+ /* XXX ksc.sc_ownedfp ? */
+ ia64_fpstate_drop(p);
+ bcopy((struct fpreg *)uc.uc_mcontext.mc_fpregs,
+ &p->p_addr->u_pcb.pcb_fp, sizeof(struct fpreg));
+ p->p_addr->u_pcb.pcb_fp_control = uc.uc_mcontext.mc_fp_control;
+
+#ifdef DEBUG
+ if (sigdebug & SDB_FOLLOW)
+ printf("sigreturn(%d): returns\n", p->p_pid);
+#endif
+#endif
+ return (EJUSTRETURN);
+}
+
+/*
+ * Machine dependent boot() routine
+ *
+ * I haven't seen anything to put here yet
+ * Possibly some stuff might be grafted back here from boot()
+ */
+void
+cpu_boot(int howto)
+{
+}
+
+/*
+ * Shutdown the CPU as much as possible
+ */
+void
+cpu_halt(void)
+{
+ /* TODO */
+}
+
+/*
+ * Clear registers on exec
+ */
+void
+setregs(struct proc *p, u_long entry, u_long stack, u_long ps_strings)
+{
+ /* TODO setup trapframe to enter CSU code at the right place */
+}
+
+int
+ptrace_set_pc(struct proc *p, unsigned long addr)
+{
+ /* TODO set pc in trapframe */
+ return 0;
+}
+
+int
+ptrace_single_step(struct proc *p)
+{
+ /* TODO arrange for user process to single step */
+ return 0;
+}
+
+int ptrace_read_u_check(struct proc *p, vm_offset_t addr, size_t len)
+{
+ vm_offset_t gap;
+
+ if ((vm_offset_t) (addr + len) < addr)
+ return EPERM;
+ if ((vm_offset_t) (addr + len) <= sizeof(struct user))
+ return 0;
+
+ gap = (char *) p->p_md.md_tf - (char *) p->p_addr;
+
+ if ((vm_offset_t) addr < gap)
+ return EPERM;
+ if ((vm_offset_t) (addr + len) <=
+ (vm_offset_t) (gap + sizeof(struct trapframe)))
+ return 0;
+ return EPERM;
+}
+
+int
+ptrace_write_u(struct proc *p, vm_offset_t off, long data)
+{
+ vm_offset_t min;
+#if 0
+ struct trapframe frame_copy;
+ struct trapframe *tp;
+#endif
+
+ /*
+ * Privileged kernel state is scattered all over the user area.
+ * Only allow write access to parts of regs and to fpregs.
+ */
+ min = (char *)p->p_md.md_tf - (char *)p->p_addr;
+ if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) {
+#if 0
+ tp = p->p_md.md_tf;
+ frame_copy = *tp;
+ *(int *)((char *)&frame_copy + (off - min)) = data;
+ if (!EFLAGS_SECURE(frame_copy.tf_eflags, tp->tf_eflags) ||
+ !CS_SECURE(frame_copy.tf_cs))
+ return (EINVAL);
+#endif
+ *(int*)((char *)p->p_addr + off) = data;
+ return (0);
+ }
+ min = offsetof(struct user, u_pcb);
+ if (off >= min && off <= min + sizeof(struct pcb)) {
+ *(int*)((char *)p->p_addr + off) = data;
+ return (0);
+ }
+ return (EFAULT);
+}
+
+int
+ia64_pa_access(vm_offset_t pa)
+{
+ return VM_PROT_READ|VM_PROT_WRITE;
+}
+
+int
+fill_regs(p, regs)
+ struct proc *p;
+ struct reg *regs;
+{
+ /* TODO copy trapframe to regs */
+ return (0);
+}
+
+int
+set_regs(p, regs)
+ struct proc *p;
+ struct reg *regs;
+{
+ /* TODO copy regs to trapframe */
+ return (0);
+}
+
+int
+fill_fpregs(p, fpregs)
+ struct proc *p;
+ struct fpreg *fpregs;
+{
+ /* TODO copy fpu state to fpregs */
+ ia64_fpstate_save(p, 0);
+
+#if 0
+ bcopy(&p->p_addr->u_pcb.pcb_fp, fpregs, sizeof *fpregs);
+#endif
+ return (0);
+}
+
+int
+set_fpregs(p, fpregs)
+ struct proc *p;
+ struct fpreg *fpregs;
+{
+ /* TODO copy fpregs fpu state */
+ ia64_fpstate_drop(p);
+
+#if 0
+ bcopy(fpregs, &p->p_addr->u_pcb.pcb_fp, sizeof *fpregs);
+#endif
+ return (0);
+}
+
+#ifndef DDB
+void
+Debugger(const char *msg)
+{
+ printf("Debugger(\"%s\") called.\n", msg);
+}
+#endif /* no DDB */
+
+#include <sys/disklabel.h>
+
+/*
+ * Determine the size of the transfer, and make sure it is
+ * within the boundaries of the partition. Adjust transfer
+ * if needed, and signal errors or early completion.
+ */
+int
+bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel)
+{
+#if 0
+ struct partition *p = lp->d_partitions + dkpart(bp->bio_dev);
+ int labelsect = lp->d_partitions[0].p_offset;
+ int maxsz = p->p_size,
+ sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT;
+
+ /* overwriting disk label ? */
+ /* XXX should also protect bootstrap in first 8K */
+ if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect &&
+#if LABELSECTOR != 0
+ bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect &&
+#endif
+ (bp->bio_cmd == BIO_WRITE) && wlabel == 0) {
+ bp->bio_error = EROFS;
+ goto bad;
+ }
+
+#if defined(DOSBBSECTOR) && defined(notyet)
+ /* overwriting master boot record? */
+ if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR &&
+ (bp->bio_cmd == BIO_WRITE) && wlabel == 0) {
+ bp->bio_error = EROFS;
+ goto bad;
+ }
+#endif
+
+ /* beyond partition? */
+ if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) {
+ /* if exactly at end of disk, return an EOF */
+ if (bp->bio_blkno == maxsz) {
+ bp->bio_resid = bp->bio_bcount;
+ return(0);
+ }
+ /* or truncate if part of it fits */
+ sz = maxsz - bp->bio_blkno;
+ if (sz <= 0) {
+ bp->bio_error = EINVAL;
+ goto bad;
+ }
+ bp->bio_bcount = sz << DEV_BSHIFT;
+ }
+
+ bp->bio_pblkno = bp->bio_blkno + p->p_offset;
+ return(1);
+
+bad:
+ bp->bio_flags |= BIO_ERROR;
+#endif
+ return(-1);
+
+}
+
+static int
+sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
+ req);
+ if (!error && req->newptr)
+ resettodr();
+ return (error);
+}
+
+SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
+ &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
+
+SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
+ CTLFLAG_RW, &disable_rtc_set, 0, "");
+
+SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
+ CTLFLAG_RW, &wall_cmos_clock, 0, "");
+
+void
+ia64_fpstate_check(struct proc *p)
+{
+#if 0
+ /* TODO panic if p has fp enabled and p != fpcurproc */
+ if (p->p_addr->u_pcb.pcb_hw.apcb_flags & IA64_PCB_FLAGS_FEN)
+ if (p != fpcurproc)
+ panic("ia64_check_fpcurproc: bogus");
+#endif
+}
+
+#define SET_FEN(p) /* TODO set fp enable for p */
+
+#define CLEAR_FEN(p) /* TODO clear fp enable for p */
+
+/*
+ * Save the floating point state in the pcb. Use this to get read-only
+ * access to the floating point state. If write is true, the current
+ * fp process is cleared so that fp state can safely be modified. The
+ * process will automatically reload the changed state by generating a
+ * FEN trap.
+ */
+void
+ia64_fpstate_save(struct proc *p, int write)
+{
+#if 0
+ if (p == fpcurproc) {
+ /*
+ * If curproc != fpcurproc, then we need to enable FEN
+ * so that we can dump the fp state.
+ */
+ ia64_pal_wrfen(1);
+
+ /*
+ * Save the state in the pcb.
+ */
+ savefpstate(&p->p_addr->u_pcb.pcb_fp);
+
+ if (write) {
+ /*
+ * If fpcurproc == curproc, just ask the
+ * PALcode to disable FEN, otherwise we must
+ * clear the FEN bit in fpcurproc's pcb.
+ */
+ if (fpcurproc == curproc)
+ ia64_pal_wrfen(0);
+ else
+ CLEAR_FEN(fpcurproc);
+ fpcurproc = NULL;
+ } else {
+ /*
+ * Make sure that we leave FEN enabled if
+ * curproc == fpcurproc. We must have at most
+ * one process with FEN enabled. Note that FEN
+ * must already be set in fpcurproc's pcb.
+ */
+ if (curproc != fpcurproc)
+ ia64_pal_wrfen(0);
+ }
+ }
+#endif
+}
+
+/*
+ * Relinquish ownership of the FP state. This is called instead of
+ * ia64_save_fpstate() if the entire FP state is being changed
+ * (e.g. on sigreturn).
+ */
+void
+ia64_fpstate_drop(struct proc *p)
+{
+#if 0
+ if (p == fpcurproc) {
+ if (p == curproc) {
+ /*
+ * Disable FEN via the PALcode. This will
+ * clear the bit in the pcb as well.
+ */
+ ia64_pal_wrfen(0);
+ } else {
+ /*
+ * Clear the FEN bit of the pcb.
+ */
+ CLEAR_FEN(p);
+ }
+ fpcurproc = NULL;
+ }
+#endif
+}
+
+/*
+ * Switch the current owner of the fp state to p, reloading the state
+ * from the pcb.
+ */
+void
+ia64_fpstate_switch(struct proc *p)
+{
+#if 0
+ /*
+ * Enable FEN so that we can access the fp registers.
+ */
+ ia64_pal_wrfen(1);
+ if (fpcurproc) {
+ /*
+ * Dump the old fp state if its valid.
+ */
+ savefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp);
+ CLEAR_FEN(fpcurproc);
+ }
+
+ /*
+ * Remember the new FP owner and reload its state.
+ */
+ fpcurproc = p;
+ restorefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp);
+
+ /*
+ * If the new owner is curproc, leave FEN enabled, otherwise
+ * mark its PCB so that it gets FEN when we context switch to
+ * it later.
+ */
+ if (p != curproc) {
+ ia64_pal_wrfen(0);
+ SET_FEN(p);
+ }
+
+ p->p_md.md_flags |= MDP_FPUSED;
+#endif
+}
diff --git a/sys/ia64/ia64/mem.c b/sys/ia64/ia64/mem.c
new file mode 100644
index 0000000..b3f5531
--- /dev/null
+++ b/sys/ia64/ia64/mem.c
@@ -0,0 +1,296 @@
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and code derived from software contributed to
+ * Berkeley by William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: mem.c 1.13 89/10/08$
+ * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
+ * $FreeBSD$
+ */
+
+/*
+ * Memory special file
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/memrange.h>
+#include <sys/proc.h>
+#include <sys/msgbuf.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/uio.h>
+
+#include <machine/frame.h>
+#ifdef PERFMON
+#include <machine/perfmon.h>
+#endif
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+
+static dev_t memdev, kmemdev;
+#ifdef PERFMON
+static dev_t perfdev;
+#endif /* PERFMON */
+
+static d_open_t mmopen;
+static d_close_t mmclose;
+static d_read_t mmrw;
+static d_ioctl_t mmioctl;
+static d_mmap_t memmmap;
+
+#define CDEV_MAJOR 2
+static struct cdevsw mem_cdevsw = {
+ /* open */ mmopen,
+ /* close */ mmclose,
+ /* read */ mmrw,
+ /* write */ mmrw,
+ /* ioctl */ mmioctl,
+ /* poll */ (d_poll_t *)seltrue,
+ /* mmap */ memmmap,
+ /* strategy */ nostrategy,
+ /* name */ "mem",
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ D_MEM,
+ /* bmaj */ -1
+};
+
+#if NHWI > 0
+#define ICU_LEN (NHWI)
+#else
+#define ICU_LEN (NSWI)
+#endif
+
+struct mem_range_softc mem_range_softc;
+
+static int
+mmclose(dev_t dev, int flags, int fmt, struct proc *p)
+{
+ switch (minor(dev)) {
+#ifdef PERFMON
+ case 32:
+ return perfmon_close(dev, flags, fmt, p);
+#endif
+ default:
+ break;
+ }
+ return (0);
+}
+
+static int
+mmopen(dev_t dev, int flags, int fmt, struct proc *p)
+{
+
+ switch (minor(dev)) {
+ case 0:
+ case 1:
+ if ((flags & FWRITE) && securelevel > 0)
+ return (EPERM);
+ break;
+ case 32:
+#ifdef PERFMON
+ return perfmon_open(dev, flags, fmt, p);
+#else
+ return ENODEV;
+#endif
+ default:
+ break;
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+mmrw(dev_t dev, struct uio *uio, int flags)
+{
+ vm_offset_t o, v;
+ int c = 0;
+ struct iovec *iov;
+ int error = 0, rw;
+ vm_offset_t addr, eaddr;
+
+ while (uio->uio_resid > 0 && !error) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("mmrw");
+ continue;
+ }
+ switch (minor(dev)) {
+
+/* minor device 0 is physical memory */
+ case 0:
+ v = uio->uio_offset;
+kmemphys:
+ /* Allow reads only in RAM. */
+ rw = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE;
+ if ((ia64_pa_access(v) & rw) != rw) {
+ error = EFAULT;
+ c = 0;
+ break;
+ }
+
+ o = uio->uio_offset & PAGE_MASK;
+ c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
+ error =
+ uiomove((caddr_t)IA64_PHYS_TO_RR7(v), c, uio);
+ continue;
+
+/* minor device 1 is kernel memory */
+ case 1:
+ v = uio->uio_offset;
+
+ if (v >= IA64_RR_BASE(6)) {
+ v = IA64_RR_MASK(v);
+ goto kmemphys;
+ }
+
+ c = min(iov->iov_len, MAXPHYS);
+ /*
+ * Make sure that all of the pages are currently resident so
+ * that we don't create any zero-fill pages.
+ */
+ addr = trunc_page(v);
+ eaddr = round_page(v + c);
+ for (; addr < eaddr; addr += PAGE_SIZE)
+ if (pmap_extract(kernel_pmap, addr) == 0)
+ return EFAULT;
+ if (!kernacc((caddr_t)v, c,
+ uio->uio_rw == UIO_READ ?
+ VM_PROT_READ : VM_PROT_WRITE))
+ return (EFAULT);
+ error = uiomove((caddr_t)v, c, uio);
+ }
+
+ if (error)
+ break;
+ iov->iov_base += c;
+ iov->iov_len -= c;
+ uio->uio_offset += c;
+ uio->uio_resid -= c;
+ }
+ return (error);
+}
+
+/*******************************************************\
+* allow user processes to MMAP some memory sections *
+* instead of going through read/write *
+\*******************************************************/
+static int
+memmmap(dev_t dev, vm_offset_t offset, int prot)
+{
+ /*
+ * /dev/mem is the only one that makes sense through this
+ * interface. For /dev/kmem any physaddr we return here
+ * could be transient and hence incorrect or invalid at
+ * a later time.
+ */
+ if (minor(dev) != 0)
+ return (-1);
+
+ /*
+ * Allow access only in RAM.
+ */
+ if ((prot & ia64_pa_access(atop((vm_offset_t)offset))) != prot)
+ return (-1);
+ return (ia64_btop(IA64_PHYS_TO_RR7(offset)));
+}
+
+static int
+mmioctl(dev_t dev, u_long cmd, caddr_t cmdarg, int flags, struct proc *p)
+{
+ switch(minor(dev)) {
+#ifdef PERFMON
+ case 32:
+ return perfmon_ioctl(dev, cmd, cmdarg, flags, p);
+#endif
+ default:
+ return ENODEV;
+ }
+
+ return (0);
+}
+
+static int
+mem_modevent(module_t mod, int type, void *data)
+{
+ switch(type) {
+ case MOD_LOAD:
+ if (bootverbose)
+ printf("mem: <memory & I/O>\n");
+/* XXX - ??? */
+#if 0
+ /* Initialise memory range handling */
+ if (mem_range_softc.mr_op != NULL)
+ mem_range_softc.mr_op->init(&mem_range_softc);
+#endif
+
+ memdev = make_dev(&mem_cdevsw, 0, UID_ROOT, GID_KMEM,
+ 0640, "mem");
+ kmemdev = make_dev(&mem_cdevsw, 1, UID_ROOT, GID_KMEM,
+ 0640, "kmem");
+#ifdef PERFMON
+ perfdev = make_dev(&mem_cdevsw, 32, UID_ROOT, GID_KMEM,
+ 0640, "perfmon");
+#endif /* PERFMON */
+ return 0;
+
+ case MOD_UNLOAD:
+ destroy_dev(memdev);
+ destroy_dev(kmemdev);
+#ifdef PERFMON
+ destroy_dev(perfdev);
+#endif /* PERFMON */
+ return 0;
+
+ case MOD_SHUTDOWN:
+ return 0;
+
+ default:
+ return EOPNOTSUPP;
+ }
+}
+
+DEV_MODULE(mem, mem_modevent, NULL);
diff --git a/sys/ia64/ia64/mp_machdep.c b/sys/ia64/ia64/mp_machdep.c
new file mode 100644
index 0000000..6dea327
--- /dev/null
+++ b/sys/ia64/ia64/mp_machdep.c
@@ -0,0 +1,789 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <machine/mutex.h>
+#include <sys/ktr.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <sys/user.h>
+#include <sys/dkstat.h>
+
+#include <machine/smp.h>
+#include <machine/lock.h>
+#include <machine/atomic.h>
+#include <machine/ipl.h>
+#include <machine/globaldata.h>
+#include <machine/pmap.h>
+#include <machine/clock.h>
+
+#define CHECKSTATE_USER 0
+#define CHECKSTATE_SYS 1
+#define CHECKSTATE_INTR 2
+
+volatile u_int stopped_cpus;
+volatile u_int started_cpus;
+volatile u_int checkstate_probed_cpus;
+volatile u_int checkstate_need_ast;
+volatile u_int checkstate_pending_ast;
+struct proc* checkstate_curproc[NCPUS];
+int checkstate_cpustate[NCPUS];
+u_long checkstate_pc[NCPUS];
+volatile u_int resched_cpus;
+void (*cpustop_restartfunc) __P((void));
+int mp_ncpus;
+
+int smp_started;
+int boot_cpu_id;
+u_int32_t all_cpus;
+
+static struct globaldata *cpuno_to_globaldata[NCPUS];
+
+int smp_active = 0; /* are the APs allowed to run? */
+SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
+
+/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
+int forward_irq_enabled = 1;
+SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
+ &forward_irq_enabled, 0, "");
+
+/* Enable forwarding of a signal to a process running on a different CPU */
+static int forward_signal_enabled = 1;
+SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
+ &forward_signal_enabled, 0, "");
+
+/* Enable forwarding of roundrobin to all other cpus */
+static int forward_roundrobin_enabled = 1;
+SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
+ &forward_roundrobin_enabled, 0, "");
+
+/*
+ * Initialise a struct globaldata.
+ */
+void
+globaldata_init(struct globaldata *globaldata, int cpuno, size_t sz)
+{
+ bzero(globaldata, sz);
+ globaldata->gd_cpuno = cpuno;
+ globaldata->gd_other_cpus = all_cpus & ~(1 << cpuno);
+ globaldata->gd_next_asn = 0;
+ globaldata->gd_current_asngen = 1;
+ cpuno_to_globaldata[cpuno] = globaldata;
+}
+
+struct globaldata *
+globaldata_find(int cpuno)
+{
+ return cpuno_to_globaldata[cpuno];
+}
+
+/* Implementation of simplelocks */
+
+void
+s_lock_init(struct simplelock *lkp)
+{
+ lkp->lock_data = 0;
+}
+
+void
+s_lock(struct simplelock *lkp)
+{
+ for (;;) {
+ if (s_lock_try(lkp))
+ return;
+
+ /*
+ * Spin until clear.
+ */
+ while (lkp->lock_data)
+ ;
+ }
+}
+
+int
+s_lock_try(struct simplelock *lkp)
+{
+ return 0; /* XXX needed? */
+}
+
+/* Other stuff */
+
+/* lock around the MP rendezvous */
+static struct mtx smp_rv_lock;
+
+static void
+init_locks(void)
+{
+ mtx_init(&smp_rv_lock, "smp_rendezvous", MTX_SPIN);
+}
+
+void
+mp_start()
+{
+ init_locks();
+}
+
+void
+mp_announce()
+{
+}
+
+void
+smp_invltlb()
+{
+}
+
+#define GD_TO_INDEX(pc, prof) \
+ ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
+ (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
+
+static void
+addugd_intr_forwarded(struct proc *p, int id, int *astmap)
+{
+ int i;
+ struct uprof *prof;
+ u_long pc;
+
+ pc = checkstate_pc[id];
+ prof = &p->p_stats->p_prof;
+ if (pc >= prof->pr_off &&
+ (i = GD_TO_INDEX(pc, prof)) < prof->pr_size) {
+ if ((p->p_flag & P_OWEUPC) == 0) {
+ prof->pr_addr = pc;
+ prof->pr_ticks = 1;
+ p->p_flag |= P_OWEUPC;
+ }
+ *astmap |= (1 << id);
+ }
+}
+
+static void
+forwarded_statclock(int id, int pscnt, int *astmap)
+{
+ struct pstats *pstats;
+ long rss;
+ struct rusage *ru;
+ struct vmspace *vm;
+ int cpustate;
+ struct proc *p;
+#ifdef GPROF
+ register struct gmonparam *g;
+ int i;
+#endif
+
+ p = checkstate_curproc[id];
+ cpustate = checkstate_cpustate[id];
+
+ switch (cpustate) {
+ case CHECKSTATE_USER:
+ if (p->p_flag & P_PROFIL)
+ addugd_intr_forwarded(p, id, astmap);
+ if (pscnt > 1)
+ return;
+ p->p_uticks++;
+ if (p->p_nice > NZERO)
+ cp_time[CP_NICE]++;
+ else
+ cp_time[CP_USER]++;
+ break;
+ case CHECKSTATE_SYS:
+#ifdef GPROF
+ /*
+ * Kernel statistics are just like addugd_intr, only easier.
+ */
+ g = &_gmonparam;
+ if (g->state == GMON_PROF_ON) {
+ i = checkstate_pc[id] - g->lowpc;
+ if (i < g->textsize) {
+ i /= HISTFRACTION * sizeof(*g->kcount);
+ g->kcount[i]++;
+ }
+ }
+#endif
+ if (pscnt > 1)
+ return;
+
+ if (!p)
+ cp_time[CP_IDLE]++;
+ else {
+ p->p_sticks++;
+ cp_time[CP_SYS]++;
+ }
+ break;
+ case CHECKSTATE_INTR:
+ default:
+#ifdef GPROF
+ /*
+ * Kernel statistics are just like addugd_intr, only easier.
+ */
+ g = &_gmonparam;
+ if (g->state == GMON_PROF_ON) {
+ i = checkstate_pc[id] - g->lowpc;
+ if (i < g->textsize) {
+ i /= HISTFRACTION * sizeof(*g->kcount);
+ g->kcount[i]++;
+ }
+ }
+#endif
+ if (pscnt > 1)
+ return;
+ if (p)
+ p->p_iticks++;
+ cp_time[CP_INTR]++;
+ }
+ if (p != NULL) {
+ schedclock(p);
+
+ /* Update resource usage integrals and maximums. */
+ if ((pstats = p->p_stats) != NULL &&
+ (ru = &pstats->p_ru) != NULL &&
+ (vm = p->p_vmspace) != NULL) {
+ ru->ru_ixrss += pgtok(vm->vm_tsize);
+ ru->ru_idrss += pgtok(vm->vm_dsize);
+ ru->ru_isrss += pgtok(vm->vm_ssize);
+ rss = pgtok(vmspace_resident_count(vm));
+ if (ru->ru_maxrss < rss)
+ ru->ru_maxrss = rss;
+ }
+ }
+}
+
+#define BETTER_CLOCK_DIAGNOSTIC
+
+void
+forward_statclock(int pscnt)
+{
+ int map;
+ int id;
+ int i;
+
+ /* Kludge. We don't yet have separate locks for the interrupts
+ * and the kernel. This means that we cannot let the other processors
+ * handle complex interrupts while inhibiting them from entering
+ * the kernel in a non-interrupt context.
+ *
+ * What we can do, without changing the locking mechanisms yet,
+ * is letting the other processors handle a very simple interrupt
+ * (wich determines the processor states), and do the main
+ * work ourself.
+ */
+
+ CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
+
+ if (!smp_started || cold || panicstr)
+ return;
+
+ /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
+
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
+ checkstate_probed_cpus = 0;
+ if (map != 0)
+ smp_ipi_selected(map, IPI_CHECKSTATE);
+
+ i = 0;
+ while (checkstate_probed_cpus != map) {
+ /* spin */
+ i++;
+ if (i == 100000) {
+#ifdef BETTER_CLOCK_DIAGNOSTIC
+ printf("forward_statclock: checkstate %x\n",
+ checkstate_probed_cpus);
+#endif
+ break;
+ }
+ }
+
+ /*
+ * Step 2: walk through other processors processes, update ticks and
+ * profiling info.
+ */
+
+ map = 0;
+ for (id = 0; id < mp_ncpus; id++) {
+ if (id == cpuid)
+ continue;
+ if (((1 << id) & checkstate_probed_cpus) == 0)
+ continue;
+ forwarded_statclock(id, pscnt, &map);
+ }
+ if (map != 0) {
+ checkstate_need_ast |= map;
+ smp_ipi_selected(map, IPI_AST);
+ i = 0;
+ while ((checkstate_need_ast & map) != 0) {
+ /* spin */
+ i++;
+ if (i > 100000) {
+#ifdef BETTER_CLOCK_DIAGNOSTIC
+ printf("forward_statclock: dropped ast 0x%x\n",
+ checkstate_need_ast & map);
+#endif
+ break;
+ }
+ }
+ }
+}
+
+void
+forward_hardclock(int pscnt)
+{
+ int map;
+ int id;
+ struct proc *p;
+ struct pstats *pstats;
+ int i;
+
+ /* Kludge. We don't yet have separate locks for the interrupts
+ * and the kernel. This means that we cannot let the other processors
+ * handle complex interrupts while inhibiting them from entering
+ * the kernel in a non-interrupt context.
+ *
+ * What we can do, without changing the locking mechanisms yet,
+ * is letting the other processors handle a very simple interrupt
+ * (wich determines the processor states), and do the main
+ * work ourself.
+ */
+
+ CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
+
+ if (!smp_started || cold || panicstr)
+ return;
+
+ /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
+
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
+ checkstate_probed_cpus = 0;
+ if (map != 0)
+ smp_ipi_selected(map, IPI_CHECKSTATE);
+
+ i = 0;
+ while (checkstate_probed_cpus != map) {
+ /* spin */
+ i++;
+ if (i == 100000) {
+#ifdef BETTER_CLOCK_DIAGNOSTIC
+ printf("forward_hardclock: checkstate %x\n",
+ checkstate_probed_cpus);
+#endif
+ breakpoint();
+ break;
+ }
+ }
+
+ /*
+ * Step 2: walk through other processors processes, update virtual
+ * timer and profiling timer. If stathz == 0, also update ticks and
+ * profiling info.
+ */
+
+ map = 0;
+ for (id = 0; id < mp_ncpus; id++) {
+ if (id == cpuid)
+ continue;
+ if (((1 << id) & checkstate_probed_cpus) == 0)
+ continue;
+ p = checkstate_curproc[id];
+ if (p) {
+ pstats = p->p_stats;
+ if (checkstate_cpustate[id] == CHECKSTATE_USER &&
+ timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
+ itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
+ psignal(p, SIGVTALRM);
+ map |= (1 << id);
+ }
+ if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
+ itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
+ psignal(p, SIGPROF);
+ map |= (1 << id);
+ }
+ }
+ if (stathz == 0) {
+ forwarded_statclock( id, pscnt, &map);
+ }
+ }
+ if (map != 0) {
+ checkstate_need_ast |= map;
+ smp_ipi_selected(map, IPI_AST);
+ i = 0;
+ while ((checkstate_need_ast & map) != 0) {
+ /* spin */
+ i++;
+ if (i > 100000) {
+#ifdef BETTER_CLOCK_DIAGNOSTIC
+ printf("forward_hardclock: dropped ast 0x%x\n",
+ checkstate_need_ast & map);
+#endif
+ break;
+ }
+ }
+ }
+}
+
+void
+forward_signal(struct proc *p)
+{
+ int map;
+ int id;
+ int i;
+
+ /* Kludge. We don't yet have separate locks for the interrupts
+ * and the kernel. This means that we cannot let the other processors
+ * handle complex interrupts while inhibiting them from entering
+ * the kernel in a non-interrupt context.
+ *
+ * What we can do, without changing the locking mechanisms yet,
+ * is letting the other processors handle a very simple interrupt
+ * (wich determines the processor states), and do the main
+ * work ourself.
+ */
+
+ CTR1(KTR_SMP, "forward_signal(%p)", p);
+
+ if (!smp_started || cold || panicstr)
+ return;
+ if (!forward_signal_enabled)
+ return;
+ while (1) {
+ if (p->p_stat != SRUN)
+ return;
+ id = p->p_oncpu;
+ if (id == 0xff)
+ return;
+ map = (1<<id);
+ checkstate_need_ast |= map;
+ smp_ipi_selected(map, IPI_AST);
+ i = 0;
+ while ((checkstate_need_ast & map) != 0) {
+ /* spin */
+ i++;
+ if (i > 100000) {
+#if 0
+ printf("forward_signal: dropped ast 0x%x\n",
+ checkstate_need_ast & map);
+#endif
+ break;
+ }
+ }
+ if (id == p->p_oncpu)
+ return;
+ }
+}
+
+void
+forward_roundrobin(void)
+{
+ u_int map;
+ int i;
+
+ CTR0(KTR_SMP, "forward_roundrobin()");
+
+ if (!smp_started || cold || panicstr)
+ return;
+ if (!forward_roundrobin_enabled)
+ return;
+ resched_cpus |= PCPU_GET(other_cpus);
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
+ smp_ipi_selected(map, IPI_AST);
+ i = 0;
+ while ((checkstate_need_ast & map) != 0) {
+ /* spin */
+ i++;
+ if (i > 100000) {
+#if 0
+ printf("forward_roundrobin: dropped ast 0x%x\n",
+ checkstate_need_ast & map);
+#endif
+ break;
+ }
+ }
+}
+
+/*
+ * When called the executing CPU will send an IPI to all other CPUs
+ * requesting that they halt execution.
+ *
+ * Usually (but not necessarily) called with 'other_cpus' as its arg.
+ *
+ * - Signals all CPUs in map to stop.
+ * - Waits for each to stop.
+ *
+ * Returns:
+ * -1: error
+ * 0: NA
+ * 1: ok
+ *
+ * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
+ * from executing at same time.
+ */
+int
+stop_cpus(u_int map)
+{
+ int i;
+
+ if (!smp_started)
+ return 0;
+
+ CTR1(KTR_SMP, "stop_cpus(%x)", map);
+
+ /* send the stop IPI to all CPUs in map */
+ smp_ipi_selected(map, IPI_STOP);
+
+ i = 0;
+ while ((stopped_cpus & map) != map) {
+ /* spin */
+ i++;
+ if (i == 100000) {
+ printf("timeout stopping cpus\n");
+ break;
+ }
+ ia64_mf();
+ }
+
+ printf("stopped_cpus=%x\n", stopped_cpus);
+
+ return 1;
+}
+
+
+/*
+ * Called by a CPU to restart stopped CPUs.
+ *
+ * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
+ *
+ * - Signals all CPUs in map to restart.
+ * - Waits for each to restart.
+ *
+ * Returns:
+ * -1: error
+ * 0: NA
+ * 1: ok
+ */
+int
+restart_cpus(u_int map)
+{
+ if (!smp_started)
+ return 0;
+
+ CTR1(KTR_SMP, "restart_cpus(%x)", map);
+
+ started_cpus = map; /* signal other cpus to restart */
+ ia64_mf();
+
+ while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
+ ia64_mf();
+
+ return 1;
+}
+
+/*
+ * All-CPU rendezvous. CPUs are signalled, all execute the setup function
+ * (if specified), rendezvous, execute the action function (if specified),
+ * rendezvous again, execute the teardown function (if specified), and then
+ * resume.
+ *
+ * Note that the supplied external functions _must_ be reentrant and aware
+ * that they are running in parallel and in an unknown lock context.
+ */
+static void (*smp_rv_setup_func)(void *arg);
+static void (*smp_rv_action_func)(void *arg);
+static void (*smp_rv_teardown_func)(void *arg);
+static void *smp_rv_func_arg;
+static volatile int smp_rv_waiters[2];
+
+void
+smp_rendezvous_action(void)
+{
+ /* setup function */
+ if (smp_rv_setup_func != NULL)
+ smp_rv_setup_func(smp_rv_func_arg);
+ /* spin on entry rendezvous */
+ atomic_add_int(&smp_rv_waiters[0], 1);
+ while (smp_rv_waiters[0] < mp_ncpus)
+ ;
+ /* action function */
+ if (smp_rv_action_func != NULL)
+ smp_rv_action_func(smp_rv_func_arg);
+ /* spin on exit rendezvous */
+ atomic_add_int(&smp_rv_waiters[1], 1);
+ while (smp_rv_waiters[1] < mp_ncpus)
+ ;
+ /* teardown function */
+ if (smp_rv_teardown_func != NULL)
+ smp_rv_teardown_func(smp_rv_func_arg);
+}
+
+void
+smp_rendezvous(void (* setup_func)(void *),
+ void (* action_func)(void *),
+ void (* teardown_func)(void *),
+ void *arg)
+{
+ /* obtain rendezvous lock */
+ mtx_enter(&smp_rv_lock, MTX_SPIN); /* XXX sleep here? NOWAIT flag? */
+
+ /* set static function pointers */
+ smp_rv_setup_func = setup_func;
+ smp_rv_action_func = action_func;
+ smp_rv_teardown_func = teardown_func;
+ smp_rv_func_arg = arg;
+ smp_rv_waiters[0] = 0;
+ smp_rv_waiters[1] = 0;
+
+ /* signal other processors, which will enter the IPI with interrupts off */
+ smp_ipi_all_but_self(IPI_RENDEZVOUS);
+
+ /* call executor function */
+ smp_rendezvous_action();
+
+ /* release lock */
+ mtx_exit(&smp_rv_lock, MTX_SPIN);
+}
+
+/*
+ * send an IPI to a set of cpus.
+ */
+void
+smp_ipi_selected(u_int32_t cpus, u_int64_t ipi)
+{
+ struct globaldata *globaldata;
+
+ CTR2(KTR_SMP, "smp_ipi_selected: cpus: %x ipi: %lx", cpus, ipi);
+ ia64_mf();
+ while (cpus) {
+ int cpuno = ffs(cpus) - 1;
+ cpus &= ~(1 << cpuno);
+
+ globaldata = cpuno_to_globaldata[cpuno];
+ if (globaldata) {
+ atomic_set_64(&globaldata->gd_pending_ipis, ipi);
+ ia64_mf();
+#if 0
+ CTR1(KTR_SMP, "calling alpha_pal_wripir(%d)", cpuno);
+ alpha_pal_wripir(cpuno);
+#endif
+ }
+ }
+}
+
+/*
+ * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
+ */
+void
+smp_ipi_all(u_int64_t ipi)
+{
+ smp_ipi_selected(all_cpus, ipi);
+}
+
+/*
+ * send an IPI to all CPUs EXCEPT myself
+ */
+void
+smp_ipi_all_but_self(u_int64_t ipi)
+{
+ smp_ipi_selected(PCPU_GET(other_cpus), ipi);
+}
+
+/*
+ * send an IPI to myself
+ */
+void
+smp_ipi_self(u_int64_t ipi)
+{
+ smp_ipi_selected(1 << PCPU_GET(cpuno), ipi);
+}
+
+/*
+ * Handle an IPI sent to this processor.
+ */
+void
+smp_handle_ipi(struct trapframe *frame)
+{
+ u_int64_t ipis;
+ u_int64_t ipi;
+ int cpuno = PCPU_GET(cpuno);
+
+ do {
+ ipis = PCPU_GET(pending_ipis);
+ } while (atomic_cmpset_64(&PCPU_GET(pending_ipis), ipis, 0));
+
+ CTR1(KTR_SMP, "smp_handle_ipi(), ipis=%lx", ipis);
+ while (ipis) {
+ /*
+ * Find the lowest set bit.
+ */
+ ipi = ipis & ~(ipis - 1);
+ switch (ipi) {
+ case IPI_INVLTLB:
+ break;
+
+ case IPI_RENDEZVOUS:
+ CTR0(KTR_SMP, "IPI_RENDEZVOUS");
+ smp_rendezvous_action();
+ break;
+
+ case IPI_AST:
+ CTR0(KTR_SMP, "IPI_AST");
+ atomic_clear_int(&checkstate_need_ast, 1<<cpuno);
+ atomic_set_int(&checkstate_pending_ast, 1<<cpuno);
+ if ((frame->tf_cr_ipsr & IA64_PSR_CPL)
+ == IA64_PSR_CPL_USER)
+ ast(frame); /* XXX */
+ break;
+
+ case IPI_CHECKSTATE:
+ CTR0(KTR_SMP, "IPI_CHECKSTATE");
+ if ((frame->tf_cr_ipsr & IA64_PSR_CPL)
+ == IA64_PSR_CPL_USER)
+ checkstate_cpustate[cpuno] = CHECKSTATE_USER;
+ else if (PCPU_GET(intr_nesting_level) == 1)
+ checkstate_cpustate[cpuno] = CHECKSTATE_SYS;
+ else
+ checkstate_cpustate[cpuno] = CHECKSTATE_INTR;
+ checkstate_curproc[cpuno] = PCPU_GET(curproc);
+ atomic_set_int(&checkstate_probed_cpus, 1<<cpuno);
+ break;
+
+ case IPI_STOP:
+ CTR0(KTR_SMP, "IPI_STOP");
+ atomic_set_int(&stopped_cpus, 1<<cpuno);
+ while ((started_cpus & (1<<cpuno)) == 0)
+ ia64_mf();
+ atomic_clear_int(&started_cpus, 1<<cpuno);
+ atomic_clear_int(&stopped_cpus, 1<<cpuno);
+ break;
+ }
+ }
+}
diff --git a/sys/ia64/ia64/pal.S b/sys/ia64/ia64/pal.S
new file mode 100644
index 0000000..081117c
--- /dev/null
+++ b/sys/ia64/ia64/pal.S
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asm.h>
+
+BSS(ia64_pal_entry, 8)
+
+/*
+ * struct ia64_pal_result ia64_call_pal_static(u_int64_t proc,
+ u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
+ */
+ NESTED(ia64_call_pal_static, 4, 5, 0, r39, r40)
+
+palret = loc0
+entry = loc1
+rpsave = loc2
+pfssave = loc3
+psrsave = loc4
+
+ movl entry=ia64_pal_entry
+1: mov palret=ip // for return address
+ mov rpsave=rp
+ ;;
+ mov psrsave=psr
+ mov r28=in0 // procedure number
+ ;;
+ ld8 entry=[entry] // read entry point
+ mov r29=in1 // copy arguments
+ mov r30=in2
+ mov r31=in3
+ ;;
+ mov b1=entry
+ add palret=2f-1b,palret // calculate return address
+ ;;
+ mov b0=palret
+ rsm psr.i // disable interrupts
+ ;;
+ br.cond.sptk b1 // call into firmware
+2: mov psr.l=psrsave
+ mov b0=rpsave
+ mov ar.pfs=pfssave
+ ;;
+ srlz.d
+ br.ret.sptk b0
+
+ END(ia64_call_pal_static) \ No newline at end of file
diff --git a/sys/ia64/ia64/pal.s b/sys/ia64/ia64/pal.s
new file mode 100644
index 0000000..081117c
--- /dev/null
+++ b/sys/ia64/ia64/pal.s
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asm.h>
+
+BSS(ia64_pal_entry, 8)
+
+/*
+ * struct ia64_pal_result ia64_call_pal_static(u_int64_t proc,
+ u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
+ */
+ NESTED(ia64_call_pal_static, 4, 5, 0, r39, r40)
+
+palret = loc0
+entry = loc1
+rpsave = loc2
+pfssave = loc3
+psrsave = loc4
+
+ movl entry=ia64_pal_entry
+1: mov palret=ip // for return address
+ mov rpsave=rp
+ ;;
+ mov psrsave=psr
+ mov r28=in0 // procedure number
+ ;;
+ ld8 entry=[entry] // read entry point
+ mov r29=in1 // copy arguments
+ mov r30=in2
+ mov r31=in3
+ ;;
+ mov b1=entry
+ add palret=2f-1b,palret // calculate return address
+ ;;
+ mov b0=palret
+ rsm psr.i // disable interrupts
+ ;;
+ br.cond.sptk b1 // call into firmware
+2: mov psr.l=psrsave
+ mov b0=rpsave
+ mov ar.pfs=pfssave
+ ;;
+ srlz.d
+ br.ret.sptk b0
+
+ END(ia64_call_pal_static) \ No newline at end of file
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
new file mode 100644
index 0000000..f448de9
--- /dev/null
+++ b/sys/ia64/ia64/pmap.c
@@ -0,0 +1,2388 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ * Copyright (c) 1998,2000 Doug Rabson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
+ * with some ideas from NetBSD's alpha pmap
+ * $FreeBSD$
+ */
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+/*
+ * Following the Linux model, region IDs are allocated in groups of
+ * eight so that a single region ID can be used for as many RRs as we
+ * want by encoding the RR number into the low bits of the ID.
+ *
+ * We reserve region ID 0 for the kernel and allocate the remaining
+ * IDs for user pmaps.
+ *
+ * Region 0..4
+ * User virtually mapped
+ *
+ * Region 5
+ * Kernel virtually mapped
+ *
+ * Region 6
+ * Kernel physically mapped uncacheable
+ *
+ * Region 7
+ * Kernel physically mapped cacheable
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/msgbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/mman.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
+
+#include <sys/user.h>
+
+#include <machine/md_var.h>
+
+#ifndef PMAP_SHPGPERPROC
+#define PMAP_SHPGPERPROC 200
+#endif
+
+#if defined(DIAGNOSTIC)
+#define PMAP_DIAGNOSTIC
+#endif
+
+#define MINPV 2048
+
+#if 0
+#define PMAP_DIAGNOSTIC
+#define PMAP_DEBUG
+#endif
+
+#if !defined(PMAP_DIAGNOSTIC)
+#define PMAP_INLINE __inline
+#else
+#define PMAP_INLINE
+#endif
+
+#if 0
+
+static void
+pmap_break(void)
+{
+}
+
+/* #define PMAP_DEBUG_VA(va) if ((va) == 0x120058000) pmap_break(); else */
+
+#endif
+
+#ifndef PMAP_DEBUG_VA
+#define PMAP_DEBUG_VA(va) do {} while(0)
+#endif
+
+/*
+ * Get PDEs and PTEs for user/kernel address space
+ */
+#define pmap_pte_w(pte) ((pte)->pte_ig & PTE_IG_WIRED)
+#define pmap_pte_managed(pte) ((pte)->pte_ig & PTE_IG_MANAGED)
+#define pmap_pte_v(pte) ((pte)->pte_p)
+#define pmap_pte_pa(pte) (((pte)->pte_ppn) << 12)
+#define pmap_pte_prot(pte) (((pte)->pte_ar << 2) | (pte)->pte_pl)
+
+#define pmap_pte_set_w(pte, v) ((v)?((pte)->pte_ig |= PTE_IG_WIRED) \
+ :((pte)->pte_ig &= ~PTE_IG_WIRED))
+#define pmap_pte_set_prot(pte, v) do { \
+ (pte)->pte_ar = v >> 2; \
+ (pte)->pte_pl = v & 3; \
+} while (0)
+
+/*
+ * Given a map and a machine independent protection code,
+ * convert to an ia64 protection code.
+ */
+#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p])
+int protection_codes[2][8];
+
+/*
+ * Return non-zero if this pmap is currently active
+ */
+#define pmap_isactive(pmap) (pmap->pm_active)
+
+/*
+ * Statically allocated kernel pmap
+ */
+static struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+vm_offset_t avail_start; /* PA of first available physical page */
+vm_offset_t avail_end; /* PA of last available physical page */
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+
+
+vm_offset_t kernel_vm_end;
+
+/*
+ * Data for the ASN allocator
+ */
+static int pmap_maxasn;
+static int pmap_nextasn = 0;
+static u_int pmap_current_asngen = 1;
+static pmap_t pmap_active = 0;
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+static vm_zone_t pvzone;
+static struct vm_zone pvzone_store;
+static struct vm_object pvzone_obj;
+static vm_zone_t pvbootzone;
+static struct vm_zone pvbootzone_store;
+static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
+static int pmap_pagedaemon_waken = 0;
+static struct pv_entry *pvinit;
+static struct pv_entry *pvbootinit;
+
+static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
+static pv_entry_t get_pv_entry __P((void));
+static void ia64_protection_init __P((void));
+static void pmap_changebit __P((vm_page_t m, int bit, boolean_t setem));
+
+static void pmap_remove_all __P((vm_page_t m));
+static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, vm_page_t m));
+static boolean_t pmap_is_referenced __P((vm_page_t m));
+
+vm_offset_t
+pmap_steal_memory(vm_size_t size)
+{
+ vm_size_t bank_size;
+ vm_offset_t pa, va;
+
+ size = round_page(size);
+
+ bank_size = phys_avail[1] - phys_avail[0];
+ while (size > bank_size) {
+ int i;
+ for (i = 0; phys_avail[i+2]; i+= 2) {
+ phys_avail[i] = phys_avail[i+2];
+ phys_avail[i+1] = phys_avail[i+3];
+ }
+ phys_avail[i] = 0;
+ phys_avail[i+1] = 0;
+ if (!phys_avail[0])
+ panic("pmap_steal_memory: out of memory");
+ bank_size = phys_avail[1] - phys_avail[0];
+ }
+
+ pa = phys_avail[0];
+ phys_avail[0] += size;
+
+ va = IA64_PHYS_TO_RR7(pa);
+ bzero((caddr_t) va, size);
+ return va;
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ */
+void
+pmap_bootstrap()
+{
+ int i;
+ int boot_pvs;
+
+ /*
+ * Setup ASNs
+ */
+ pmap_nextasn = 0;
+ pmap_maxasn = 0;
+ pmap_current_asngen = 1;
+
+ avail_start = phys_avail[0];
+ for (i = 0; phys_avail[i+2]; i+= 2) ;
+ avail_end = phys_avail[i+1];
+
+ virtual_avail = IA64_RR_BASE(5);
+ virtual_end = IA64_RR_BASE(6)-1;
+
+ /*
+ * Initialize protection array.
+ */
+ ia64_protection_init();
+
+ /*
+ * The kernel's pmap is statically allocated so we don't have to use
+ * pmap_create, which is unlikely to work correctly at this part of
+ * the boot sequence (XXX and which no longer exists).
+ */
+ kernel_pmap = &kernel_pmap_store;
+ kernel_pmap->pm_count = 1;
+ kernel_pmap->pm_active = 1;
+ kernel_pmap->pm_asn = 0;
+ kernel_pmap->pm_asngen = pmap_current_asngen;
+ pmap_nextasn = 1;
+ TAILQ_INIT(&kernel_pmap->pm_pvlist);
+
+ /*
+ * Region 5 is mapped via the vhpt.
+ */
+ ia64_set_rr(IA64_RR_BASE(5),
+ (5 << 8) | (PAGE_SHIFT << 2) | 1);
+
+ /*
+ * Region 6 is direct mapped UC and region 7 is direct mapped
+ * WC. The details of this is controlled by the Alt {I,D}TLB
+ * handlers. Here we just make sure that they have the largest
+ * possible page size to minimise TLB usage.
+ */
+#if 0
+ ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
+ ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
+#endif
+
+ /*
+ * We need some PVs to cope with pmap_kenter() calls prior to
+ * pmap_init(). This is all a bit flaky and needs to be
+ * rethought, probably by avoiding the zone allocator
+ * entirely.
+ */
+ boot_pvs = 32768;
+ pvbootzone = &pvbootzone_store;
+ pvbootinit = (struct pv_entry *)
+ pmap_steal_memory(boot_pvs * sizeof (struct pv_entry));
+ zbootinit(pvbootzone, "PV ENTRY", sizeof (struct pv_entry),
+ pvbootinit, boot_pvs);
+
+ /*
+ * Set up proc0's PCB.
+ */
+#if 0
+ proc0.p_addr->u_pcb.pcb_hw.apcb_asn = 0;
+#endif
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ * pmap_init has been enhanced to support in a fairly consistant
+ * way, discontiguous physical memory.
+ */
+void
+pmap_init(phys_start, phys_end)
+ vm_offset_t phys_start, phys_end;
+{
+ int i;
+ int initial_pvs;
+
+ /*
+ * Allocate memory for random pmap data structures. Includes the
+ * pv_head_table.
+ */
+
+ for(i = 0; i < vm_page_array_size; i++) {
+ vm_page_t m;
+
+ m = &vm_page_array[i];
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+ }
+
+ /*
+ * init the pv free list
+ */
+ initial_pvs = vm_page_array_size;
+ if (initial_pvs < MINPV)
+ initial_pvs = MINPV;
+ pvzone = &pvzone_store;
+ pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
+ initial_pvs * sizeof (struct pv_entry));
+ zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
+ vm_page_array_size);
+
+ /*
+ * Now it is safe to enable pv_table recording.
+ */
+ pmap_initialized = TRUE;
+}
+
+/*
+ * Initialize the address space (zone) for the pv_entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
+ */
+void
+pmap_init2()
+{
+ pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
+ pv_entry_high_water = 9 * (pv_entry_max / 10);
+ zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
+}
+
+
+/***************************************************
+ * Manipulate TLBs for a pmap
+ ***************************************************/
+
+static void
+pmap_invalidate_asn(pmap_t pmap)
+{
+ pmap->pm_asngen = 0;
+}
+
+static void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+#if 0
+ if (pmap_isactive(pmap)) {
+ IA64_TBIS(va);
+ ia64_pal_imb(); /* XXX overkill? */
+ } else
+ pmap_invalidate_asn(pmap);
+#endif
+}
+
+static void
+pmap_invalidate_all(pmap_t pmap)
+{
+#if 0
+ if (pmap_isactive(pmap)) {
+ IA64_TBIA();
+ ia64_pal_imb(); /* XXX overkill? */
+ } else
+ pmap_invalidate_asn(pmap);
+#endif
+}
+
+static void
+pmap_get_asn(pmap_t pmap)
+{
+#if 0
+ if (pmap->pm_asngen != pmap_current_asngen) {
+ if (pmap_nextasn > pmap_maxasn) {
+ /*
+ * Start a new ASN generation.
+ *
+ * Invalidate all per-process mappings and I-cache
+ */
+ pmap_nextasn = 0;
+ pmap_current_asngen++;
+
+ if (pmap_current_asngen == 0) {
+ /*
+ * Clear the pm_asngen of all pmaps.
+ * This is safe since it is only called from
+ * pmap_activate after it has deactivated
+ * the old pmap.
+ */
+ struct proc *p;
+ pmap_t tpmap;
+
+#ifdef PMAP_DIAGNOSTIC
+ printf("pmap_get_asn: generation rollover\n");
+#endif
+ pmap_current_asngen = 1;
+ LIST_FOREACH(p, &allproc, p_list) {
+ if (p->p_vmspace) {
+ tpmap = vmspace_pmap(p->p_vmspace);
+ tpmap->pm_asngen = 0;
+ }
+ }
+ }
+
+ /*
+ * Since we are about to start re-using ASNs, we must
+ * clear out the TLB and the I-cache since they are tagged
+ * with the ASN.
+ */
+ IA64_TBIAP();
+ ia64_pal_imb(); /* XXX overkill? */
+ }
+ pmap->pm_asn = pmap_nextasn++;
+ pmap->pm_asngen = pmap_current_asngen;
+ }
+#endif
+}
+
+/***************************************************
+ * Low level helper routines.....
+ ***************************************************/
+
+/*
+ * Install a pte into the VHPT
+ */
+static PMAP_INLINE void
+pmap_install_pte(struct ia64_lpte *vhpte, struct ia64_lpte *pte)
+{
+ u_int64_t *vhp, *p;
+
+ /* invalidate the pte */
+ atomic_set_64(&vhpte->pte_tag, 1L << 63);
+ ia64_mf(); /* make sure everyone sees */
+
+ vhp = (u_int64_t *) vhpte;
+ p = (u_int64_t *) pte;
+
+ vhp[0] = p[0];
+ vhp[1] = p[1];
+ vhp[2] = p[2]; /* sets ti to one */
+
+ ia64_mf();
+}
+
+/*
+ * Compare essential parts of pte.
+ */
+static PMAP_INLINE int
+pmap_equal_pte(struct ia64_lpte *pte1, struct ia64_lpte *pte2)
+{
+ return *(u_int64_t *) pte1 == *(u_int64_t *) pte2;
+}
+
+/*
+ * this routine defines the region(s) of memory that should
+ * not be tested for the modified bit.
+ */
+static PMAP_INLINE int
+pmap_track_modified(vm_offset_t va)
+{
+ if ((va < clean_sva) || (va >= clean_eva))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Create the UPAGES for a new process.
+ * This routine directly affects the fork perf for a process.
+ */
+void
+pmap_new_proc(struct proc *p)
+{
+ int i;
+ vm_object_t upobj;
+ vm_page_t m;
+ struct user *up;
+
+ /*
+ * allocate object for the upages
+ */
+ if ((upobj = p->p_upages_obj) == NULL) {
+ upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
+ p->p_upages_obj = upobj;
+ }
+
+ /* get a kernel virtual address for the UPAGES for this proc */
+ if ((up = p->p_addr) == NULL) {
+ up = (struct user *) kmem_alloc_nofault(kernel_map,
+ UPAGES * PAGE_SIZE);
+ if (up == NULL)
+ panic("pmap_new_proc: u_map allocation failed");
+ p->p_addr = up;
+ }
+
+ for(i=0;i<UPAGES;i++) {
+ /*
+ * Get a kernel stack page
+ */
+ m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+
+ /*
+ * Wire the page
+ */
+ m->wire_count++;
+ cnt.v_wire_count++;
+
+ pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
+ VM_PAGE_TO_PHYS(m));
+
+ pmap_invalidate_page(kernel_pmap,
+ (vm_offset_t)up + i * PAGE_SIZE);
+
+ vm_page_wakeup(m);
+ vm_page_flag_clear(m, PG_ZERO);
+ vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
+ m->valid = VM_PAGE_BITS_ALL;
+ }
+}
+
+/*
+ * Dispose the UPAGES for a process that has exited.
+ * This routine directly impacts the exit perf of a process.
+ */
+void
+pmap_dispose_proc(p)
+ struct proc *p;
+{
+ int i;
+ vm_object_t upobj;
+ vm_page_t m;
+
+ upobj = p->p_upages_obj;
+
+ for(i=0;i<UPAGES;i++) {
+
+ if ((m = vm_page_lookup(upobj, i)) == NULL)
+ panic("pmap_dispose_proc: upage already missing???");
+
+ vm_page_busy(m);
+
+ pmap_kremove((vm_offset_t)p->p_addr + PAGE_SIZE * i);
+
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+ }
+}
+
+/*
+ * Allow the UPAGES for a process to be prejudicially paged out.
+ */
+void
+pmap_swapout_proc(p)
+ struct proc *p;
+{
+ int i;
+ vm_object_t upobj;
+ vm_page_t m;
+
+ /*
+ * Make sure we aren't fpcurproc.
+ */
+ ia64_fpstate_save(p, 1);
+
+ upobj = p->p_upages_obj;
+ /*
+ * let the upages be paged
+ */
+ for(i=0;i<UPAGES;i++) {
+ if ((m = vm_page_lookup(upobj, i)) == NULL)
+ panic("pmap_swapout_proc: upage already missing???");
+ vm_page_dirty(m);
+ vm_page_unwire(m, 0);
+ pmap_kremove((vm_offset_t)p->p_addr + PAGE_SIZE * i);
+ }
+}
+
+/*
+ * Bring the UPAGES for a specified process back in.
+ */
+void
+pmap_swapin_proc(p)
+ struct proc *p;
+{
+ int i,rv;
+ vm_object_t upobj;
+ vm_page_t m;
+
+ upobj = p->p_upages_obj;
+ for(i=0;i<UPAGES;i++) {
+
+ m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+
+ pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
+ VM_PAGE_TO_PHYS(m));
+
+ if (m->valid != VM_PAGE_BITS_ALL) {
+ rv = vm_pager_get_pages(upobj, &m, 1, 0);
+ if (rv != VM_PAGER_OK)
+ panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid);
+ m = vm_page_lookup(upobj, i);
+ m->valid = VM_PAGE_BITS_ALL;
+ }
+
+ vm_page_wire(m);
+ vm_page_wakeup(m);
+ vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
+ }
+
+ /*
+ * The pcb may be at a different physical address now so cache the
+ * new address.
+ */
+ p->p_md.md_pcbpaddr = (void*) vtophys((vm_offset_t) &p->p_addr->u_pcb);
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+
+void
+pmap_pinit0(pmap)
+ struct pmap *pmap;
+{
+ /*
+ * kernel_pmap is the same as any other pmap.
+ */
+ pmap_pinit(pmap);
+ pmap->pm_flags = 0;
+ pmap->pm_count = 1;
+ pmap->pm_ptphint = NULL;
+ pmap->pm_active = 0;
+ pmap->pm_asn = 0;
+ pmap->pm_asngen = 0;
+ TAILQ_INIT(&pmap->pm_pvlist);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+void
+pmap_pinit(pmap)
+ register struct pmap *pmap;
+{
+ pmap->pm_flags = 0;
+ pmap->pm_count = 1;
+ pmap->pm_ptphint = NULL;
+ pmap->pm_active = 0;
+ pmap->pm_asn = 0;
+ pmap->pm_asngen = 0;
+ TAILQ_INIT(&pmap->pm_pvlist);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+}
+
+/*
+ * Wire in kernel global address entries. To avoid a race condition
+ * between pmap initialization and pmap_growkernel, this procedure
+ * should be called after the vmspace is attached to the process
+ * but before this pmap is activated.
+ */
+void
+pmap_pinit2(pmap)
+ struct pmap *pmap;
+{
+}
+
+/***************************************************
+* Pmap allocation/deallocation routines.
+ ***************************************************/
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+#if defined(DIAGNOSTIC)
+ if (object->ref_count != 1)
+ panic("pmap_release: pteobj reference count != 1");
+#endif
+}
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+void
+pmap_destroy(pmap_t pmap)
+{
+ int count;
+
+ if (pmap == NULL)
+ return;
+
+ count = --pmap->pm_count;
+ if (count == 0) {
+ pmap_release(pmap);
+ panic("destroying a pmap is not yet implemented");
+ }
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap_t pmap)
+{
+ if (pmap != NULL) {
+ pmap->pm_count++;
+ }
+}
+
+/***************************************************
+* page management routines.
+ ***************************************************/
+
+/*
+ * free the pv_entry back to the free list
+ */
+static PMAP_INLINE void
+free_pv_entry(pv_entry_t pv)
+{
+ pv_entry_count--;
+ zfreei(pvzone, pv);
+}
+
+/*
+ * get a new pv_entry, allocating a block from the system
+ * when needed.
+ * the memory allocation is performed bypassing the malloc code
+ * because of the possibility of allocations at interrupt time.
+ */
+static pv_entry_t
+get_pv_entry(void)
+{
+ if (!pvinit)
+ return zalloci(pvbootzone);
+
+ pv_entry_count++;
+ if (pv_entry_high_water &&
+ (pv_entry_count > pv_entry_high_water) &&
+ (pmap_pagedaemon_waken == 0)) {
+ pmap_pagedaemon_waken = 1;
+ wakeup (&vm_pages_needed);
+ }
+ return zalloci(pvzone);
+}
+
+/*
+ * Add a pv_entry to the VHPT.
+ */
+static void
+pmap_enter_vhpt(pv_entry_t pv)
+{
+ struct ia64_lpte *vhpte;
+
+ vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va);
+
+ pv->pv_pte.pte_chain = vhpte->pte_chain;
+ vhpte->pte_chain = ia64_tpa((vm_offset_t) pv);
+
+ if (!vhpte->pte_p && pv->pv_pte.pte_p)
+ pmap_install_pte(vhpte, &pv->pv_pte);
+ else
+ ia64_mf();
+}
+
+/*
+ * Update VHPT after pv->pv_pte has changed.
+ */
+static void
+pmap_update_vhpt(pv_entry_t pv)
+{
+ struct ia64_lpte *vhpte;
+
+ vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va);
+
+ if ((!vhpte->pte_p || vhpte->pte_tag == pv->pv_pte.pte_tag)
+ && pv->pv_pte.pte_p)
+ pmap_install_pte(vhpte, &pv->pv_pte);
+}
+
+/*
+ * Remove a pv_entry from the VHPT. Return true if it worked.
+ */
+static int
+pmap_remove_vhpt(pv_entry_t pv)
+{
+ struct ia64_lpte *pte;
+ struct ia64_lpte *lpte;
+ struct ia64_lpte *vhpte;
+ u_int64_t tag;
+
+ vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va);
+
+ /*
+ * If the VHPTE is invalid, there can't be a collision chain.
+ */
+ if (!vhpte->pte_p)
+ return 1;
+
+ lpte = vhpte;
+ pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(vhpte->pte_chain);
+ tag = ia64_ttag(pv->pv_va);
+
+ while (pte->pte_tag != tag) {
+ lpte = pte;
+ if (pte->pte_chain)
+ pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain);
+ else
+ return 1; /* error here? */
+ }
+
+ /*
+ * Snip this pv_entry out of the collision chain.
+ */
+ lpte->pte_chain = pte->pte_chain;
+
+ /*
+ * If the VHPTE matches as well, change it to map the first
+ * element from the chain if there is one.
+ */
+ if (vhpte->pte_tag == tag) {
+ if (vhpte->pte_chain) {
+ pte = (struct ia64_lpte *)
+ IA64_PHYS_TO_RR7(vhpte->pte_chain);
+ pmap_install_pte(vhpte, pte);
+ } else {
+ vhpte->pte_p = 0;
+ ia64_mf();
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Make a pv_entry_t which maps the given virtual address. The pte
+ * will be initialised with pte_p = 0. The function pmap_set_pv()
+ * should be called to change the value of the pte.
+ * Must be called at splvm().
+ */
+static pv_entry_t
+pmap_make_pv(pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ pv = get_pv_entry();
+ bzero(pv, sizeof(*pv));
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+
+ pv->pv_pte.pte_p = 0; /* invalid for now */
+ pv->pv_pte.pte_ma = PTE_MA_WB; /* cacheable, write-back */
+ pv->pv_pte.pte_a = 0;
+ pv->pv_pte.pte_d = 0;
+ pv->pv_pte.pte_pl = 0; /* privilege level 0 */
+ pv->pv_pte.pte_ar = 3; /* read/write/execute */
+ pv->pv_pte.pte_ppn = 0; /* physical address */
+ pv->pv_pte.pte_ed = 0;
+ pv->pv_pte.pte_ig = 0;
+
+ pv->pv_pte.pte_ps = PAGE_SHIFT; /* page size */
+ pv->pv_pte.pte_key = 0; /* protection key */
+
+ pv->pv_pte.pte_tag = ia64_ttag(va);
+
+ pmap_enter_vhpt(pv);
+
+ TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
+ pmap->pm_stats.resident_count++;
+
+ return pv;
+}
+
+/*
+ * Initialise a pv_entry_t with a given physical address and
+ * protection code. If the passed vm_page_t is non-zero, the entry is
+ * added to its list of mappings.
+ * Must be called at splvm().
+ */
+static void
+pmap_set_pv(pmap_t pmap, pv_entry_t pv, vm_offset_t pa,
+ int prot, vm_page_t m)
+{
+ if (pv->pv_pte.pte_p && pv->pv_pte.pte_ig & PTE_IG_MANAGED) {
+ vm_offset_t opa = pv->pv_pte.pte_ppn << 12;
+ vm_page_t om = PHYS_TO_VM_PAGE(opa);
+
+ TAILQ_REMOVE(&om->md.pv_list, pv, pv_list);
+ om->md.pv_list_count--;
+
+ if (TAILQ_FIRST(&om->md.pv_list) == NULL)
+ vm_page_flag_clear(om, PG_MAPPED | PG_WRITEABLE);
+ }
+
+ pv->pv_pte.pte_p = 1; /* set to valid */
+ if (m) {
+ pv->pv_pte.pte_a = 0;
+ pv->pv_pte.pte_d = 0;
+ } else {
+ pv->pv_pte.pte_a = 1;
+ pv->pv_pte.pte_d = 1;
+ }
+ pv->pv_pte.pte_a = 1; /* XXX remove this after implementing trap */
+ pv->pv_pte.pte_d = 1;
+ pv->pv_pte.pte_pl = prot & 3; /* privilege level 0 */
+ pv->pv_pte.pte_ar = prot >> 2; /* read/write/execute */
+ pv->pv_pte.pte_ppn = pa >> 12; /* physical address */
+
+ if (m) {
+ pv->pv_pte.pte_ig |= PTE_IG_MANAGED;
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+ m->md.pv_list_count++;
+ }
+
+ /*
+ * Update the VHPT entry if it needs to change.
+ */
+ pmap_update_vhpt(pv);
+}
+
+/*
+ * Remove a mapping represented by a particular pv_entry_t. If the
+ * passed vm_page_t is non-zero, then the entry is removed from it.
+ * Must be called at splvm().
+ */
+static int
+pmap_remove_pv(pmap_t pmap, pv_entry_t pv, vm_page_t m)
+{
+ int rtval;
+
+ /*
+ * First remove from the VHPT.
+ */
+ rtval = pmap_remove_vhpt(pv);
+ if (!rtval)
+ return rtval;
+
+ if (m) {
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ m->md.pv_list_count--;
+
+ if (TAILQ_FIRST(&m->md.pv_list) == NULL)
+ vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
+ }
+
+ TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
+ pmap->pm_stats.resident_count--;
+
+ free_pv_entry(pv);
+
+ return (rtval);
+}
+
+/*
+ * Find a pv given a pmap and virtual address.
+ */
+static pv_entry_t
+pmap_find_pv(pmap_t pmap, vm_offset_t va)
+{
+ struct ia64_lpte *pte;
+ u_int64_t tag;
+
+ pte = (struct ia64_lpte *) ia64_thash(va);
+ if (!pte->pte_chain)
+ return 0;
+
+ tag = ia64_ttag(va);
+ pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain);
+
+ while (pte->pte_tag != tag) {
+ if (pte->pte_chain)
+ pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain);
+ else
+ return 0;
+ }
+
+ return (pv_entry_t) pte; /* XXX wrong va */
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+vm_offset_t
+pmap_extract(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
+{
+ pv_entry_t pv = pmap_find_pv(pmap, va);
+ if (pv)
+ return pmap_pte_pa(&pv->pv_pte);
+ else
+ return 0;
+}
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ */
+void
+pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+{
+ int i, inval;
+ pv_entry_t pv;
+
+ for (i = 0; i < count; i++) {
+ vm_offset_t tva = va + i * PAGE_SIZE;
+ pv = pmap_find_pv(kernel_pmap, tva);
+ inval = 0;
+ if (!pv)
+ pv = pmap_make_pv(kernel_pmap, tva);
+ else
+ inval = 1;
+
+ PMAP_DEBUG_VA(va);
+ pmap_set_pv(kernel_pmap, pv,
+ VM_PAGE_TO_PHYS(m[i]),
+ (PTE_AR_RWX<<2) | PTE_PL_KERN, 0);
+ if (inval)
+ pmap_invalidate_page(kernel_pmap, tva);
+ }
+}
+
+/*
+ * this routine jerks page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ */
+void
+pmap_qremove(va, count)
+ vm_offset_t va;
+ int count;
+{
+ int i;
+ pv_entry_t pv;
+
+ for (i = 0; i < count; i++) {
+ pv = pmap_find_pv(kernel_pmap, va);
+ PMAP_DEBUG_VA(va);
+ if (pv) {
+ pmap_remove_pv(kernel_pmap, pv, 0);
+ pmap_invalidate_page(kernel_pmap, va);
+ }
+ va += PAGE_SIZE;
+ }
+}
+
+/*
+ * Add a wired page to the kva.
+ */
+void
+pmap_kenter(vm_offset_t va, vm_offset_t pa)
+{
+ pv_entry_t pv;
+
+ pv = pmap_find_pv(kernel_pmap, va);
+ if (!pv)
+ pv = pmap_make_pv(kernel_pmap, va);
+ pmap_set_pv(kernel_pmap, pv,
+ pa, (PTE_AR_RWX<<2) | PTE_PL_KERN, 0);
+}
+
+/*
+ * Remove a page from the kva
+ */
+void
+pmap_kremove(vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ pv = pmap_find_pv(kernel_pmap, va);
+ if (pv)
+ pmap_remove_pv(kernel_pmap, pv, 0);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vm_offset_t
+pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
+{
+ /*
+ * XXX We should really try to use larger pagesizes here to
+ * cut down the number of PVs used.
+ */
+ while (start < end) {
+ pmap_kenter(virt, start);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ return (virt);
+}
+
+/*
+ * This routine is very drastic, but can save the system
+ * in a pinch.
+ */
+void
+pmap_collect()
+{
+ int i;
+ vm_page_t m;
+ static int warningdone=0;
+
+ if (pmap_pagedaemon_waken == 0)
+ return;
+
+ if (warningdone < 5) {
+ printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
+ warningdone++;
+ }
+
+ for(i = 0; i < vm_page_array_size; i++) {
+ m = &vm_page_array[i];
+ if (m->wire_count || m->hold_count || m->busy ||
+ (m->flags & PG_BUSY))
+ continue;
+ pmap_remove_all(m);
+ }
+ pmap_pagedaemon_waken = 0;
+}
+
+/*
+ * Remove a single page from a process address space
+ */
+static void
+pmap_remove_page(pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+ vm_page_t m;
+ int rtval;
+ int s;
+
+ s = splvm();
+
+ pv = pmap_find_pv(pmap, va);
+
+ rtval = 0;
+ if (pv) {
+ m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte));
+ rtval = pmap_remove_pv(pmap, pv, m);
+ }
+
+ splx(s);
+ return;
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t va, nva;
+
+ if (pmap == NULL)
+ return;
+
+ if (pmap->pm_stats.resident_count == 0)
+ return;
+
+ /*
+ * special handling of removing one page. a very
+ * common operation and easy to short circuit some
+ * code.
+ */
+ if (sva + PAGE_SIZE == eva) {
+ pmap_remove_page(pmap, sva);
+ return;
+ }
+
+ if (atop(eva - sva) > pmap->pm_stats.resident_count) {
+ for (va = sva; va < eva; va = nva) {
+ pmap_remove_page(pmap, va);
+ nva = va + PAGE_SIZE;
+ }
+ } else {
+ pv_entry_t pv, pvnext;
+ int s;
+
+ s = splvm();
+ for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
+ pv;
+ pv = pvnext) {
+ pvnext = TAILQ_NEXT(pv, pv_plist);
+ if (pv->pv_va >= sva && pv->pv_va < eva) {
+ vm_page_t m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte));
+ pmap_remove_pv(pmap, pv, m);
+ }
+ }
+ splx(s);
+ }
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ *
+ * Notes:
+ * Original versions of this routine were very
+ * inefficient because they iteratively called
+ * pmap_remove (slow...)
+ */
+
+static void
+pmap_remove_all(vm_page_t m)
+{
+ register pv_entry_t pv;
+ int nmodify;
+ int s;
+
+ nmodify = 0;
+#if defined(PMAP_DIAGNOSTIC)
+ /*
+ * XXX this makes pmap_page_protect(NONE) illegal for non-managed
+ * pages!
+ */
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
+ panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m));
+ }
+#endif
+
+ s = splvm();
+
+ while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+ vm_page_t m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte));
+ pmap_remove_pv(pv->pv_pmap, pv, m);
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+ }
+
+ vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
+
+ splx(s);
+ return;
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ pv_entry_t pv;
+ int newprot;
+
+ if (pmap == NULL)
+ return;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+
+ if (prot & VM_PROT_WRITE)
+ return;
+
+ newprot = pte_prot(pmap, prot);
+
+ if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
+ panic("pmap_protect: unaligned addresses");
+
+ while (sva < eva) {
+ /*
+ * If page is invalid, skip this page
+ */
+ pv = pmap_find_pv(pmap, sva);
+ if (!pv) {
+ sva += PAGE_SIZE;
+ continue;
+ }
+
+ if (pmap_pte_prot(&pv->pv_pte) != newprot) {
+ pmap_pte_set_prot(&pv->pv_pte, newprot);
+ pmap_update_vhpt(pv);
+ pmap_invalidate_page(pmap, sva);
+ }
+
+ sva += PAGE_SIZE;
+ }
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+void
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ boolean_t wired)
+{
+ vm_offset_t pa;
+ pv_entry_t pv;
+ vm_offset_t opa;
+ struct ia64_lpte origpte;
+ int managed;
+
+ if (pmap == NULL)
+ return;
+
+ va &= ~PAGE_MASK;
+#ifdef PMAP_DIAGNOSTIC
+ if (va > VM_MAX_KERNEL_ADDRESS)
+ panic("pmap_enter: toobig");
+#endif
+
+ pv = pmap_find_pv(pmap, va);
+ if (!pv)
+ pv = pmap_make_pv(pmap, va);
+
+ origpte = pv->pv_pte;
+ if (origpte.pte_p)
+ opa = pmap_pte_pa(&origpte);
+ else
+ opa = 0;
+
+ pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK;
+ managed = 0;
+
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if (origpte.pte_p && (opa == pa)) {
+ /*
+ * Wiring change, just update stats. We don't worry about
+ * wiring PT pages as they remain resident as long as there
+ * are valid mappings in them. Hence, if a user page is wired,
+ * the PT page will be also.
+ */
+ if (wired && ((origpte.pte_ig & PTE_IG_WIRED) == 0))
+ pmap->pm_stats.wired_count++;
+ else if (!wired && (origpte.pte_ig & PTE_IG_WIRED))
+ pmap->pm_stats.wired_count--;
+
+ managed = origpte.pte_ig & PTE_IG_MANAGED;
+ goto validate;
+ } else {
+ /*
+ * Mapping has changed, invalidate old range and fall
+ * through to handle validating new mapping.
+ */
+ }
+
+ /*
+ * Increment counters
+ */
+ if (wired)
+ pmap->pm_stats.wired_count++;
+
+validate:
+ /*
+ * Now validate mapping with desired protection/wiring.
+ * This enters the pv_entry_t on the page's list if necessary.
+ */
+ pmap_set_pv(pmap, pv, pa, pte_prot(pmap, prot), m);
+
+ if (wired)
+ pv->pv_pte.pte_ig |= PTE_IG_WIRED;
+
+ /*
+ * if the mapping or permission bits are different, we need
+ * to invalidate the page.
+ */
+ if (pmap_equal_pte(&origpte, &pv->pv_pte)) {
+ PMAP_DEBUG_VA(va);
+ if (origpte.pte_p)
+ pmap_invalidate_page(pmap, va);
+ }
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * 5. Tlbflush is deferred to calling procedure.
+ * 6. Page IS managed.
+ * but is *MUCH* faster than pmap_enter...
+ */
+
+static void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
+{
+ pv_entry_t pv;
+ int s;
+
+ s = splvm();
+
+ pv = pmap_find_pv(pmap, va);
+ if (!pv)
+ pv = pmap_make_pv(pmap, va);
+
+ /*
+ * Enter on the PV list if part of our managed memory. Note that we
+ * raise IPL while manipulating pv_table since pmap_enter can be
+ * called at interrupt time.
+ */
+ PMAP_DEBUG_VA(va);
+ pmap_set_pv(pmap, pv, VM_PAGE_TO_PHYS(m),
+ (PTE_AR_R << 2) | PTE_PL_USER, m);
+
+ splx(s);
+}
+
+/*
+ * Make temporary mapping for a physical address. This is called
+ * during dump.
+ */
+void *
+pmap_kenter_temporary(vm_offset_t pa)
+{
+ return (void *) IA64_PHYS_TO_RR7(pa);
+}
+
+#define MAX_INIT_PT (96)
+/*
+ * pmap_object_init_pt preloads the ptes for a given object
+ * into the specified pmap. This eliminates the blast of soft
+ * faults on process startup and immediately after an mmap.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
+ vm_object_t object, vm_pindex_t pindex,
+ vm_size_t size, int limit)
+{
+ vm_offset_t tmpidx;
+ int psize;
+ vm_page_t p;
+ int objpgs;
+
+ if (pmap == NULL || object == NULL)
+ return;
+
+ psize = ia64_btop(size);
+
+ if ((object->type != OBJT_VNODE) ||
+ (limit && (psize > MAX_INIT_PT) &&
+ (object->resident_page_count > MAX_INIT_PT))) {
+ return;
+ }
+
+ if (psize + pindex > object->size)
+ psize = object->size - pindex;
+
+ /*
+ * if we are processing a major portion of the object, then scan the
+ * entire thing.
+ */
+ if (psize > (object->resident_page_count >> 2)) {
+ objpgs = psize;
+
+ for (p = TAILQ_FIRST(&object->memq);
+ ((objpgs > 0) && (p != NULL));
+ p = TAILQ_NEXT(p, listq)) {
+
+ tmpidx = p->pindex;
+ if (tmpidx < pindex) {
+ continue;
+ }
+ tmpidx -= pindex;
+ if (tmpidx >= psize) {
+ continue;
+ }
+ if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
+ (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
+ if ((p->queue - p->pc) == PQ_CACHE)
+ vm_page_deactivate(p);
+ vm_page_busy(p);
+ pmap_enter_quick(pmap,
+ addr + ia64_ptob(tmpidx), p);
+ vm_page_flag_set(p, PG_MAPPED);
+ vm_page_wakeup(p);
+ }
+ objpgs -= 1;
+ }
+ } else {
+ /*
+ * else lookup the pages one-by-one.
+ */
+ for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
+ p = vm_page_lookup(object, tmpidx + pindex);
+ if (p &&
+ ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
+ (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
+ if ((p->queue - p->pc) == PQ_CACHE)
+ vm_page_deactivate(p);
+ vm_page_busy(p);
+ pmap_enter_quick(pmap,
+ addr + ia64_ptob(tmpidx), p);
+ vm_page_flag_set(p, PG_MAPPED);
+ vm_page_wakeup(p);
+ }
+ }
+ }
+ return;
+}
+
+/*
+ * pmap_prefault provides a quick way of clustering
+ * pagefaults into a processes address space. It is a "cousin"
+ * of pmap_object_init_pt, except it runs at page fault time instead
+ * of mmap time.
+ */
+#define PFBAK 4
+#define PFFOR 4
+#define PAGEORDER_SIZE (PFBAK+PFFOR)
+
+static int pmap_prefault_pageorder[] = {
+ -PAGE_SIZE, PAGE_SIZE,
+ -2 * PAGE_SIZE, 2 * PAGE_SIZE,
+ -3 * PAGE_SIZE, 3 * PAGE_SIZE
+ -4 * PAGE_SIZE, 4 * PAGE_SIZE
+};
+
+void
+pmap_prefault(pmap, addra, entry)
+ pmap_t pmap;
+ vm_offset_t addra;
+ vm_map_entry_t entry;
+{
+ int i;
+ vm_offset_t starta;
+ vm_offset_t addr;
+ vm_pindex_t pindex;
+ vm_page_t m, mpte;
+ vm_object_t object;
+
+ if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace)))
+ return;
+
+ object = entry->object.vm_object;
+
+ starta = addra - PFBAK * PAGE_SIZE;
+ if (starta < entry->start) {
+ starta = entry->start;
+ } else if (starta > addra) {
+ starta = 0;
+ }
+
+ mpte = NULL;
+ for (i = 0; i < PAGEORDER_SIZE; i++) {
+ vm_object_t lobject;
+ pv_entry_t pv;
+
+ addr = addra + pmap_prefault_pageorder[i];
+ if (addr > addra + (PFFOR * PAGE_SIZE))
+ addr = 0;
+
+ if (addr < starta || addr >= entry->end)
+ continue;
+
+ pv = pmap_find_pv(pmap, addr);
+ if (pv)
+ continue;
+
+ pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
+ lobject = object;
+ for (m = vm_page_lookup(lobject, pindex);
+ (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object));
+ lobject = lobject->backing_object) {
+ if (lobject->backing_object_offset & PAGE_MASK)
+ break;
+ pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
+ m = vm_page_lookup(lobject->backing_object, pindex);
+ }
+
+ /*
+ * give-up when a page is not in memory
+ */
+ if (m == NULL)
+ break;
+
+ if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
+ (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
+
+ if ((m->queue - m->pc) == PQ_CACHE) {
+ vm_page_deactivate(m);
+ }
+ vm_page_busy(m);
+ pmap_enter_quick(pmap, addr, m);
+ vm_page_flag_set(m, PG_MAPPED);
+ vm_page_wakeup(m);
+ }
+ }
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_change_wiring(pmap, va, wired)
+ register pmap_t pmap;
+ vm_offset_t va;
+ boolean_t wired;
+{
+ pv_entry_t pv;
+
+ if (pmap == NULL)
+ return;
+
+ pv = pmap_find_pv(pmap, va);
+
+ if (wired && !pmap_pte_w(&pv->pv_pte))
+ pmap->pm_stats.wired_count++;
+ else if (!wired && pmap_pte_w(&pv->pv_pte))
+ pmap->pm_stats.wired_count--;
+
+ /*
+ * Wiring is not a hardware characteristic so there is no need to
+ * invalidate TLB.
+ */
+ pmap_pte_set_w(&pv->pv_pte, wired);
+}
+
+
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
+ vm_offset_t src_addr)
+{
+}
+
+/*
+ * Routine: pmap_kernel
+ * Function:
+ * Returns the physical map handle for the kernel.
+ */
+pmap_t
+pmap_kernel()
+{
+ return (kernel_pmap);
+}
+
+/*
+ * pmap_zero_page zeros the specified hardware page by
+ * mapping it into virtual memory and using bzero to clear
+ * its contents.
+ */
+
+void
+pmap_zero_page(vm_offset_t pa)
+{
+ vm_offset_t va = IA64_PHYS_TO_RR7(pa);
+ bzero((caddr_t) va, PAGE_SIZE);
+}
+
+
+/*
+ * pmap_zero_page_area zeros the specified hardware page by
+ * mapping it into virtual memory and using bzero to clear
+ * its contents.
+ *
+ * off and size must reside within a single page.
+ */
+
+void
+pmap_zero_page_area(vm_offset_t pa, int off, int size)
+{
+ vm_offset_t va = IA64_PHYS_TO_RR7(pa);
+ bzero((char *)(caddr_t)va + off, size);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ */
+void
+pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+{
+ src = IA64_PHYS_TO_RR7(src);
+ dst = IA64_PHYS_TO_RR7(dst);
+ bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE);
+}
+
+
+/*
+ * Routine: pmap_pageable
+ * Function:
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
+ *
+ * A page which is not pageable may not take
+ * a fault; therefore, its page table entry
+ * must remain valid for the duration.
+ *
+ * This routine is merely advisory; pmap_enter
+ * will specify that these pages are to be wired
+ * down (or not) as appropriate.
+ */
+void
+pmap_pageable(pmap, sva, eva, pageable)
+ pmap_t pmap;
+ vm_offset_t sva, eva;
+ boolean_t pageable;
+{
+}
+
+/*
+ * this routine returns true if a physical page resides
+ * in the given pmap.
+ */
+boolean_t
+pmap_page_exists(pmap, m)
+ pmap_t pmap;
+ vm_page_t m;
+{
+ register pv_entry_t pv;
+ int s;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return FALSE;
+
+ s = splvm();
+
+ /*
+ * Not found, check current mappings returning immediately if found.
+ */
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ if (pv->pv_pmap == pmap) {
+ splx(s);
+ return TRUE;
+ }
+ }
+ splx(s);
+ return (FALSE);
+}
+
+#define PMAP_REMOVE_PAGES_CURPROC_ONLY
+/*
+ * Remove all pages from specified address space
+ * this aids process exit speeds. Also, this code
+ * is special cased for current process only, but
+ * can have the more generic (and slightly slower)
+ * mode enabled. This is much faster than pmap_remove
+ * in the case of running down an entire address space.
+ */
+void
+pmap_remove_pages(pmap, sva, eva)
+ pmap_t pmap;
+ vm_offset_t sva, eva;
+{
+ pv_entry_t pv, npv;
+ int s;
+
+#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
+ if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) {
+ printf("warning: pmap_remove_pages called with non-current pmap\n");
+ return;
+ }
+#endif
+
+ s = splvm();
+ for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
+ pv;
+ pv = npv) {
+ vm_page_t m;
+
+ npv = TAILQ_NEXT(pv, pv_plist);
+
+ if (pv->pv_va >= eva || pv->pv_va < sva) {
+ continue;
+ }
+
+/*
+ * We cannot remove wired pages from a process' mapping at this time
+ */
+ if (pv->pv_pte.pte_ig & PTE_IG_WIRED) {
+ continue;
+ }
+
+ PMAP_DEBUG_VA(pv->pv_va);
+
+ m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte));
+ pmap_remove_pv(pmap, pv, m);
+ }
+ splx(s);
+
+ pmap_invalidate_all(pmap);
+}
+
+/*
+ * this routine is used to modify bits in ptes
+ */
+static void
+pmap_changebit(vm_page_t m, int bit, boolean_t setem)
+{
+#if 0
+ pv_entry_t pv;
+ int changed;
+ int s;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return;
+
+ s = splvm();
+ changed = 0;
+
+ /*
+ * Loop over all current mappings setting/clearing as appropos If
+ * setting RO do we need to clear the VAC?
+ */
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+
+ /*
+ * don't write protect pager mappings
+ */
+ if (!setem && bit == (PG_UWE|PG_KWE)) {
+ if (!pmap_track_modified(pv->pv_va))
+ continue;
+ }
+
+#if defined(PMAP_DIAGNOSTIC)
+ if (!pv->pv_pmap) {
+ printf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va);
+ continue;
+ }
+#endif
+
+ pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va);
+
+ changed = 0;
+ if (setem) {
+ *pte |= bit;
+ changed = 1;
+ } else {
+ pt_entry_t pbits = *pte;
+ if (pbits & bit) {
+ changed = 1;
+ *pte = pbits & ~bit;
+ }
+ }
+ if (changed)
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+ }
+ splx(s);
+#endif
+}
+
+/*
+ * pmap_page_protect:
+ *
+ * Lower the permission for all mappings to a given page.
+ */
+void
+pmap_page_protect(vm_page_t m, vm_prot_t prot)
+{
+#if 0
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
+ pmap_changebit(m, PG_KWE|PG_UWE, FALSE);
+ } else {
+ pmap_remove_all(m);
+ }
+ }
+#endif
+}
+
+vm_offset_t
+pmap_phys_address(ppn)
+ int ppn;
+{
+ return (ia64_ptob(ppn));
+}
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return the count of reference bits for a page, clearing all of them.
+ *
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+ pv_entry_t pv;
+ int count = 0;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return 0;
+
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ if (pv->pv_pte.pte_a) {
+ count++;
+ pv->pv_pte.pte_a = 0;
+ pmap_update_vhpt(pv);
+ }
+ }
+
+ return count;
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+static boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+ pv_entry_t pv;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return FALSE;
+
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ if (pv->pv_pte.pte_a) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page was modified
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+ pv_entry_t pv;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return FALSE;
+
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ if (pv->pv_pte.pte_d) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+ pv_entry_t pv;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return;
+
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ if (pv->pv_pte.pte_d) {
+ pv->pv_pte.pte_d = 0;
+ pmap_update_vhpt(pv);
+ }
+ }
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+void
+pmap_clear_reference(vm_page_t m)
+{
+ pv_entry_t pv;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return;
+
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ if (pv->pv_pte.pte_a) {
+ pv->pv_pte.pte_a = 0;
+ pmap_update_vhpt(pv);
+ }
+ }
+}
+
+/*
+ * Miscellaneous support routines follow
+ */
+
+static void
+ia64_protection_init()
+{
+ int prot, *kp, *up;
+
+ kp = protection_codes[0];
+ up = protection_codes[1];
+
+ for (prot = 0; prot < 8; prot++) {
+ switch (prot) {
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
+ *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN;
+ *up++ = (PTE_AR_R << 2) | PTE_PL_KERN;
+ break;
+
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
+ *kp++ = (PTE_AR_X_RX << 2) | PTE_PL_KERN;
+ *up++ = (PTE_AR_X_RX << 2) | PTE_PL_USER;
+ break;
+
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
+ *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN;
+ *up++ = (PTE_AR_RW << 2) | PTE_PL_USER;
+ break;
+
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN;
+ *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER;
+ break;
+
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
+ *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN;
+ *up++ = (PTE_AR_R << 2) | PTE_PL_USER;
+ break;
+
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
+ *kp++ = (PTE_AR_RX << 2) | PTE_PL_KERN;
+ *up++ = (PTE_AR_RX << 2) | PTE_PL_USER;
+ break;
+
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
+ *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN;
+ *up++ = (PTE_AR_RW << 2) | PTE_PL_USER;
+ break;
+
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN;
+ *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER;
+ break;
+ }
+ }
+}
+
+/*
+ * Map a set of physical memory pages into the kernel virtual
+ * address space. Return a pointer to where it is mapped. This
+ * routine is intended to be used for mapping device memory,
+ * NOT real memory.
+ */
+void *
+pmap_mapdev(pa, size)
+ vm_offset_t pa;
+ vm_size_t size;
+{
+ return (void*) IA64_PHYS_TO_RR6(pa);
+}
+
+/*
+ * perform the pmap work for mincore
+ */
+int
+pmap_mincore(pmap, addr)
+ pmap_t pmap;
+ vm_offset_t addr;
+{
+ pv_entry_t pv;
+ struct ia64_lpte *pte;
+ int val = 0;
+
+ pv = pmap_find_pv(pmap, addr);
+ if (pv == 0) {
+ return 0;
+ }
+ pte = &pv->pv_pte;
+
+ if (pmap_pte_v(pte)) {
+ vm_page_t m;
+ vm_offset_t pa;
+
+ val = MINCORE_INCORE;
+ if ((pte->pte_ig & PTE_IG_MANAGED) == 0)
+ return val;
+
+ pa = pmap_pte_pa(pte);
+
+ m = PHYS_TO_VM_PAGE(pa);
+
+ /*
+ * Modified by us
+ */
+ if (pte->pte_d)
+ val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
+ /*
+ * Modified by someone
+ */
+ else if (pmap_is_modified(m))
+ val |= MINCORE_MODIFIED_OTHER;
+ /*
+ * Referenced by us
+ */
+ if (pte->pte_a)
+ val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
+
+ /*
+ * Referenced by someone
+ */
+ else if (pmap_ts_referenced(m)) {
+ val |= MINCORE_REFERENCED_OTHER;
+ vm_page_flag_set(m, PG_REFERENCED);
+ }
+ }
+ return val;
+}
+
+void
+pmap_activate(struct proc *p)
+{
+ pmap_t pmap;
+
+ pmap = vmspace_pmap(p->p_vmspace);
+
+ if (pmap_active && pmap != pmap_active) {
+ pmap_active->pm_active = 0;
+ pmap_active = 0;
+ }
+
+ if (pmap->pm_asngen != pmap_current_asngen)
+ pmap_get_asn(pmap);
+
+ pmap_active = pmap;
+ pmap->pm_active = 1; /* XXX use bitmap for SMP */
+
+#if 0
+ p->p_addr->u_pcb.pcb_hw.apcb_asn = pmap->pm_asn;
+#endif
+
+ if (p == curproc) {
+#if 0
+ ia64_pal_swpctx((u_long)p->p_md.md_pcbpaddr);
+#endif
+ }
+}
+
+void
+pmap_deactivate(struct proc *p)
+{
+ pmap_t pmap;
+ pmap = vmspace_pmap(p->p_vmspace);
+ pmap->pm_active = 0;
+ pmap_active = 0;
+}
+
+vm_offset_t
+pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
+{
+
+ return addr;
+}
+
+#if 0
+#if defined(PMAP_DEBUG)
+pmap_pid_dump(int pid)
+{
+ pmap_t pmap;
+ struct proc *p;
+ int npte = 0;
+ int index;
+ LIST_FOREACH(p, &allproc, p_list) {
+ if (p->p_pid != pid)
+ continue;
+
+ if (p->p_vmspace) {
+ int i,j;
+ index = 0;
+ pmap = vmspace_pmap(p->p_vmspace);
+ for(i=0;i<1024;i++) {
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ unsigned base = i << PDRSHIFT;
+
+ pde = &pmap->pm_pdir[i];
+ if (pde && pmap_pde_v(pde)) {
+ for(j=0;j<1024;j++) {
+ unsigned va = base + (j << PAGE_SHIFT);
+ if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
+ if (index) {
+ index = 0;
+ printf("\n");
+ }
+ return npte;
+ }
+ pte = pmap_pte_quick( pmap, va);
+ if (pte && pmap_pte_v(pte)) {
+ vm_offset_t pa;
+ vm_page_t m;
+ pa = *(int *)pte;
+ m = PHYS_TO_VM_PAGE(pa);
+ printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
+ va, pa, m->hold_count, m->wire_count, m->flags);
+ npte++;
+ index++;
+ if (index >= 2) {
+ index = 0;
+ printf("\n");
+ } else {
+ printf(" ");
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return npte;
+}
+#endif
+
+#if defined(DEBUG)
+
+static void pads __P((pmap_t pm));
+static void pmap_pvdump __P((vm_page_t m));
+
+/* print address space of pmap*/
+static void
+pads(pm)
+ pmap_t pm;
+{
+ int i, j;
+ vm_offset_t va;
+ pt_entry_t *ptep;
+
+ if (pm == kernel_pmap)
+ return;
+ for (i = 0; i < 1024; i++)
+ if (pm->pm_pdir[i])
+ for (j = 0; j < 1024; j++) {
+ va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
+ if (pm == kernel_pmap && va < KERNBASE)
+ continue;
+ if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
+ continue;
+ ptep = pmap_pte_quick(pm, va);
+ if (pmap_pte_v(ptep))
+ printf("%x:%x ", va, *(int *) ptep);
+ };
+
+}
+
+static void
+pmap_pvdump(pa)
+ vm_offset_t pa;
+{
+ pv_entry_t pv;
+
+ printf("pa %x", pa);
+ m = PHYS_TO_VM_PAGE(pa);
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ printf(" -> pmap %x, va %x",
+ pv->pv_pmap, pv->pv_va);
+ pads(pv->pv_pmap);
+ }
+ printf(" ");
+}
+#endif
+#endif
diff --git a/sys/ia64/ia64/procfs_machdep.c b/sys/ia64/ia64/procfs_machdep.c
new file mode 100644
index 0000000..000eb02
--- /dev/null
+++ b/sys/ia64/ia64/procfs_machdep.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1993 Jan-Simon Pendry
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Jan-Simon Pendry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)procfs_machdep.c 8.3 (Berkeley) 1/27/94
+ *
+ * From:
+ * $FreeBSD$
+ */
+
+/*
+ * Functions to be implemented here are:
+ *
+ * procfs_read_regs(proc, regs)
+ * Get the current user-visible register set from the process
+ * and copy it into the regs structure (<machine/reg.h>).
+ * The process is stopped at the time read_regs is called.
+ *
+ * procfs_write_regs(proc, regs)
+ * Update the current register set from the passed in regs
+ * structure. Take care to avoid clobbering special CPU
+ * registers or privileged bits in the PSL.
+ * Depending on the architecture this may have fix-up work to do,
+ * especially if the IAR or PCW are modified.
+ * The process is stopped at the time write_regs is called.
+ *
+ * procfs_read_fpregs, procfs_write_fpregs
+ * deal with the floating point register set, otherwise as above.
+ *
+ * procfs_sstep(proc)
+ * Arrange for the process to trap after executing a single instruction.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/vnode.h>
+#include <machine/reg.h>
+#include <machine/md_var.h>
+#include <miscfs/procfs/procfs.h>
+
+#include <vm/vm.h>
+#include <sys/lock.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+
+#include <sys/user.h>
+
+int
+procfs_read_regs(p, regs)
+ struct proc *p;
+ struct reg *regs;
+{
+ if ((p->p_flag & P_INMEM) == 0)
+ return (EIO);
+ return (fill_regs(p, regs));
+}
+
+int
+procfs_write_regs(p, regs)
+ struct proc *p;
+ struct reg *regs;
+{
+ if ((p->p_flag & P_INMEM) == 0)
+ return (EIO);
+ return (set_regs(p, regs));
+}
+
+/*
+ * Ptrace doesn't support fpregs at all, and there are no security holes
+ * or translations for fpregs, so we can just copy them.
+ */
+
+int
+procfs_read_fpregs(p, fpregs)
+ struct proc *p;
+ struct fpreg *fpregs;
+{
+ if ((p->p_flag & P_INMEM) == 0)
+ return (EIO);
+ return (fill_fpregs(p, fpregs));
+}
+
+int
+procfs_write_fpregs(p, fpregs)
+ struct proc *p;
+ struct fpreg *fpregs;
+{
+ if ((p->p_flag & P_INMEM) == 0)
+ return (EIO);
+ return (set_fpregs(p, fpregs));
+}
+
+int
+procfs_sstep(p)
+ struct proc *p;
+{
+ return (EINVAL);
+}
+
+/*
+ * Placeholders
+ */
+int
+procfs_read_dbregs(p, dbregs)
+ struct proc *p;
+ struct dbreg *dbregs;
+{
+ return (EIO);
+}
+
+int
+procfs_write_dbregs(p, dbregs)
+ struct proc *p;
+ struct dbreg *dbregs;
+{
+ return (EIO);
+}
diff --git a/sys/ia64/ia64/ssc.c b/sys/ia64/ia64/ssc.c
new file mode 100644
index 0000000..ad6e4ed
--- /dev/null
+++ b/sys/ia64/ia64/ssc.c
@@ -0,0 +1,263 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/tty.h>
+#include <sys/proc.h>
+#include <sys/ucred.h>
+#include <sys/cons.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
+
+#define SSC_GETCHAR 21
+#define SSC_PUTCHAR 31
+
+#define SSC_POLL_HZ 50
+
+static d_open_t sscopen;
+static d_close_t sscclose;
+static d_ioctl_t sscioctl;
+
+#define CDEV_MAJOR 97
+static struct cdevsw ssc_cdevsw = {
+ /* open */ sscopen,
+ /* close */ sscclose,
+ /* read */ ttyread,
+ /* write */ ttywrite,
+ /* ioctl */ sscioctl,
+ /* poll */ ttypoll,
+ /* mmap */ nommap,
+ /* strategy */ nostrategy,
+ /* name */ "ssc",
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ 0,
+ /* bmaj */ -1
+};
+
+static struct tty *ssc_tp = NULL;
+static int polltime;
+static struct callout_handle ssctimeouthandle
+ = CALLOUT_HANDLE_INITIALIZER(&ssctimeouthandle);
+
+static void sscstart(struct tty *);
+static void ssctimeout(void *);
+static int sscparam(struct tty *, struct termios *);
+static void sscstop(struct tty *, int);
+
+static u_int64_t
+ssc(u_int64_t in0, u_int64_t in1, u_int64_t in2, u_int64_t in3, int which)
+{
+ register u_int64_t ret0 __asm("r8");
+
+ __asm __volatile("mov r15=%1\n\t"
+ "break 0x80001"
+ : "=r"(ret0)
+ : "r"(which), "r"(in0), "r"(in1), "r"(in2), "r"(in3));
+ return ret0;
+}
+
+void
+ssccnputc(dev_t dev, int c)
+{
+ ssc(c, 0, 0, 0, SSC_PUTCHAR);
+}
+
+static int
+ssccngetc(dev_t dev)
+{
+ return -1;
+}
+
+static int
+ssccncheckc(dev_t dev)
+{
+ return -1;
+}
+
+static int
+sscopen(dev_t dev, int flag, int mode, struct proc *p)
+{
+ struct tty *tp;
+ int s;
+ int error = 0, setuptimeout = 0;
+
+ tp = ssc_tp = dev->si_tty = ttymalloc(ssc_tp);
+
+ s = spltty();
+ tp->t_oproc = sscstart;
+ tp->t_param = sscparam;
+ tp->t_stop = sscstop;
+ tp->t_dev = dev;
+ if ((tp->t_state & TS_ISOPEN) == 0) {
+ tp->t_state |= TS_CARR_ON;
+ ttychars(tp);
+ tp->t_iflag = TTYDEF_IFLAG;
+ tp->t_oflag = TTYDEF_OFLAG;
+ tp->t_cflag = TTYDEF_CFLAG|CLOCAL;
+ tp->t_lflag = TTYDEF_LFLAG;
+ tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED;
+ ttsetwater(tp);
+
+ setuptimeout = 1;
+ } else if ((tp->t_state & TS_XCLUDE) && suser(p)) {
+ splx(s);
+ return EBUSY;
+ }
+
+ splx(s);
+
+ error = (*linesw[tp->t_line].l_open)(dev, tp);
+
+ if (error == 0 && setuptimeout) {
+ polltime = hz / SSC_POLL_HZ;
+ if (polltime < 1)
+ polltime = 1;
+ ssctimeouthandle = timeout(ssctimeout, tp, polltime);
+ }
+ return error;
+}
+
+static int
+sscclose(dev_t dev, int flag, int mode, struct proc *p)
+{
+ int unit = minor(dev);
+ struct tty *tp = ssc_tp;
+
+ if (unit != 0)
+ return ENXIO;
+
+ untimeout(ssctimeout, tp, ssctimeouthandle);
+ (*linesw[tp->t_line].l_close)(tp, flag);
+ ttyclose(tp);
+ return 0;
+}
+
+static int
+sscioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
+{
+ int unit = minor(dev);
+ struct tty *tp = ssc_tp;
+ int error;
+
+ if (unit != 0)
+ return ENXIO;
+
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
+ if (error != ENOIOCTL)
+ return error;
+ error = ttioctl(tp, cmd, data, flag);
+ if (error != ENOIOCTL)
+ return error;
+
+ return ENOTTY;
+}
+
+static int
+sscparam(struct tty *tp, struct termios *t)
+{
+
+ return 0;
+}
+
+static void
+sscstart(struct tty *tp)
+{
+ int s;
+
+ s = spltty();
+
+ if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) {
+ ttwwakeup(tp);
+ splx(s);
+ return;
+ }
+
+ tp->t_state |= TS_BUSY;
+ while (tp->t_outq.c_cc != 0)
+ ssccnputc(tp->t_dev, getc(&tp->t_outq));
+ tp->t_state &= ~TS_BUSY;
+
+ ttwwakeup(tp);
+ splx(s);
+}
+
+/*
+ * Stop output on a line.
+ */
+static void
+sscstop(struct tty *tp, int flag)
+{
+ int s;
+
+ s = spltty();
+ if (tp->t_state & TS_BUSY)
+ if ((tp->t_state & TS_TTSTOP) == 0)
+ tp->t_state |= TS_FLUSH;
+ splx(s);
+}
+
+static void
+ssctimeout(void *v)
+{
+ struct tty *tp = v;
+ int c;
+
+ while ((c = ssccncheckc(tp->t_dev)) != -1) {
+ if (tp->t_state & TS_ISOPEN)
+ (*linesw[tp->t_line].l_rint)(c, tp);
+ }
+ ssctimeouthandle = timeout(ssctimeout, tp, polltime);
+}
+
+CONS_DRIVER(ssc, NULL, NULL, NULL, ssccngetc, ssccncheckc, ssccnputc, NULL);
+
+void
+ssccnattach(void)
+{
+ cn_tab = &ssc_consdev;
+ ssc_consdev.cn_pri = CN_NORMAL;
+ ssc_consdev.cn_dev = makedev(CDEV_MAJOR, 0);
+ make_dev(&ssc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "ssccons");
+}
+
diff --git a/sys/ia64/ia64/support.S b/sys/ia64/ia64/support.S
new file mode 100644
index 0000000..91e3856
--- /dev/null
+++ b/sys/ia64/ia64/support.S
@@ -0,0 +1,662 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <machine/asm.h>
+#include <assym.s>
+
+ .text
+
+/**************************************************************************/
+
+/*
+ * fu{byte,word} : fetch a byte (word) from user memory
+ */
+
+ LEAF(suword, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ cmpult a0, t0, t1
+ beq t1, fusufault
+
+ lda t0, fusufault /* trap faults */
+ ldq t2, curproc
+ ldq t2, P_ADDR(t2)
+ stq t0, U_PCB_ONFAULT(t2)
+
+ stq a1, 0(a0) /* try the store */
+
+ stq zero, U_PCB_ONFAULT(t2) /* clean up */
+
+ mov zero, v0
+ RET
+#endif
+ END(suword)
+
+ LEAF(subyte, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ cmpult a0, t0, t1
+ beq t1, fusufault
+
+ lda t0, fusufault /* trap faults */
+ ldq t2, curproc
+ ldq t2, P_ADDR(t2)
+ stq t0, U_PCB_ONFAULT(t2)
+
+ zap a1, 0xfe, a1 /* mask off the byte to store */
+ insbl a1, a0, a1 /* shift it to the right place */
+ ldq_u t0, 0(a0) /* read the qword to store it in */
+ mskbl t0, a0, t0 /* make a place for our byte */
+ or a1, t0, a1 /* move it in */
+ stq_u a1, 0(a0) /* and put the byte back */
+
+ stq zero, U_PCB_ONFAULT(t2) /* clean up */
+
+ mov zero, v0
+ RET
+#endif
+ END(subyte)
+
+ LEAF(fuword, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ cmpult a0, t0, t1
+ beq t1, fusufault
+
+ lda t0, fusufault /* trap faults */
+ ldq t2, curproc
+ ldq t2, P_ADDR(t2)
+ stq t0, U_PCB_ONFAULT(t2)
+
+ ldq v0, 0(a0) /* try the fetch */
+
+ stq zero, U_PCB_ONFAULT(t2) /* clean up */
+
+ RET
+#endif
+ END(fuword)
+
+ LEAF(fubyte, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ cmpult a0, t0, t1
+ beq t1, fusufault
+
+ lda t0, fusufault /* trap faults */
+ ldq t2, curproc
+ ldq t2, P_ADDR(t2)
+ stq t0, U_PCB_ONFAULT(t2)
+
+ ldq_u v0, 0(a0) /* get the word containing our byte */
+ extbl v0, a0, v0 /* extract the byte */
+
+ stq zero, U_PCB_ONFAULT(t2) /* clean up */
+
+ RET
+#endif
+ END(fubyte)
+
+ LEAF(suibyte, 2)
+#if 0
+ ldiq v0, -1
+ RET
+#endif
+ END(suibyte)
+
+ LEAF(fusufault, 0)
+#if 0
+ ldq t0, curproc
+ ldq t0, P_ADDR(t0)
+ stq zero, U_PCB_ONFAULT(t0)
+ ldiq v0, -1
+ RET
+#endif
+ END(fusufault)
+
+LEAF(fswintrberr, 0)
+XLEAF(fuswintr) /* XXX what is a 'word'? */
+XLEAF(suswintr) /* XXX what is a 'word'? */
+#if 0
+ LDGP(pv)
+ ldiq v0, -1
+ RET
+#endif
+ END(fswintrberr)
+
+/**************************************************************************/
+
+/*
+ * Copy a null-terminated string within the kernel's address space.
+ * If lenp is not NULL, store the number of chars copied in *lenp
+ *
+ * int copystr(char *from, char *to, size_t len, size_t *lenp);
+ */
+LEAF(copystr, 4)
+#if 0
+ LDGP(pv)
+
+ mov a2, t0 /* t0 = i = len */
+ beq a2, Lcopystr2 /* if (len == 0), bail out */
+
+Lcopystr1:
+ ldq_u t1, 0(a0) /* t1 = *from */
+ extbl t1, a0, t1
+ ldq_u t3, 0(a1) /* set up t2 with quad around *to */
+ insbl t1, a1, t2
+ mskbl t3, a1, t3
+ or t3, t2, t3 /* add *from to quad around *to */
+ stq_u t3, 0(a1) /* write out that quad */
+
+ subl a2, 1, a2 /* len-- */
+ beq t1, Lcopystr2 /* if (*from == 0), bail out */
+ addq a1, 1, a1 /* to++ */
+ addq a0, 1, a0 /* from++ */
+ bne a2, Lcopystr1 /* if (len != 0) copy more */
+
+Lcopystr2:
+ beq a3, Lcopystr3 /* if (lenp != NULL) */
+ subl t0, a2, t0 /* *lenp = (i - len) */
+ stq t0, 0(a3)
+Lcopystr3:
+ beq t1, Lcopystr4 /* *from == '\0'; leave quietly */
+
+ ldiq v0, ENAMETOOLONG /* *from != '\0'; error. */
+ RET
+
+Lcopystr4:
+ mov zero, v0 /* return 0. */
+ RET
+#endif
+ END(copystr)
+
+LEAF(copyinstr, 4)
+#if 0
+ LDGP(pv)
+ lda sp, -16(sp) /* set up stack frame */
+ stq ra, (16-8)(sp) /* save ra */
+ ldiq t0, VM_MAXUSER_ADDRESS /* make sure that src addr */
+ cmpult a0, t0, t1 /* is in user space. */
+ beq t1, copyerr /* if it's not, error out. */
+ lda v0, copyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, curproc
+ ldq at_reg, P_ADDR(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(copystr) /* do the copy. */
+ .set noat
+ ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ RET /* v0 left over from copystr */
+#endif
+ END(copyinstr)
+
+LEAF(copyoutstr, 4)
+#if 0
+ LDGP(pv)
+ lda sp, -16(sp) /* set up stack frame */
+ stq ra, (16-8)(sp) /* save ra */
+ ldiq t0, VM_MAXUSER_ADDRESS /* make sure that dest addr */
+ cmpult a1, t0, t1 /* is in user space. */
+ beq t1, copyerr /* if it's not, error out. */
+ lda v0, copyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, curproc
+ ldq at_reg, P_ADDR(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(copystr) /* do the copy. */
+ .set noat
+ ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ RET /* v0 left over from copystr */
+#endif
+ END(copyoutstr)
+
+/*
+ * Alternative memory mover
+ */
+ LEAF(memcpy,3)
+#if 0
+ mov a0,t0
+ mov a1,a0
+ mov t0,a1
+ br bcopy
+#endif
+ END(memcpy)
+
+/*
+ * Copy a bytes within the kernel's address space.
+ *
+ * In the kernel, bcopy() doesn't have to handle the overlapping
+ * case; that's that ovbcopy() is for. However, it doesn't hurt
+ * to do both in bcopy, and it does provide a measure of safety.
+ *
+ * void bcopy(char *from, char *to, size_t len);
+ * void ovbcopy(char *from, char *to, size_t len);
+ */
+LEAF(bcopy,3)
+XLEAF(ovbcopy)
+#if 0
+ /* Check for negative length */
+ ble a2,bcopy_done
+
+ /* Check for overlap */
+ subq a1,a0,t5
+ cmpult t5,a2,t5
+ bne t5,bcopy_overlap
+
+ /* a3 = end address */
+ addq a0,a2,a3
+
+ /* Get the first word */
+ ldq_u t2,0(a0)
+
+ /* Do they have the same alignment? */
+ xor a0,a1,t0
+ and t0,7,t0
+ and a1,7,t1
+ bne t0,bcopy_different_alignment
+
+ /* src & dst have same alignment */
+ beq t1,bcopy_all_aligned
+
+ ldq_u t3,0(a1)
+ addq a2,t1,a2
+ mskqh t2,a0,t2
+ mskql t3,a0,t3
+ or t2,t3,t2
+
+ /* Dst is 8-byte aligned */
+
+bcopy_all_aligned:
+ /* If less than 8 bytes,skip loop */
+ subq a2,1,t0
+ and a2,7,a2
+ bic t0,7,t0
+ beq t0,bcopy_samealign_lp_end
+
+bcopy_samealign_lp:
+ stq_u t2,0(a1)
+ addq a1,8,a1
+ ldq_u t2,8(a0)
+ subq t0,8,t0
+ addq a0,8,a0
+ bne t0,bcopy_samealign_lp
+
+bcopy_samealign_lp_end:
+ /* If we're done, exit */
+ bne a2,bcopy_small_left
+ stq_u t2,0(a1)
+ RET
+
+bcopy_small_left:
+ mskql t2,a2,t4
+ ldq_u t3,0(a1)
+ mskqh t3,a2,t3
+ or t4,t3,t4
+ stq_u t4,0(a1)
+ RET
+
+bcopy_different_alignment:
+ /*
+ * this is the fun part
+ */
+ addq a0,a2,a3
+ cmpule a2,8,t0
+ bne t0,bcopy_da_finish
+
+ beq t1,bcopy_da_noentry
+
+ /* Do the initial partial word */
+ subq zero,a1,t0
+ and t0,7,t0
+ ldq_u t3,7(a0)
+ extql t2,a0,t2
+ extqh t3,a0,t3
+ or t2,t3,t5
+ insql t5,a1,t5
+ ldq_u t6,0(a1)
+ mskql t6,a1,t6
+ or t5,t6,t5
+ stq_u t5,0(a1)
+ addq a0,t0,a0
+ addq a1,t0,a1
+ subq a2,t0,a2
+ ldq_u t2,0(a0)
+
+bcopy_da_noentry:
+ subq a2,1,t0
+ bic t0,7,t0
+ and a2,7,a2
+ beq t0,bcopy_da_finish2
+
+bcopy_da_lp:
+ ldq_u t3,7(a0)
+ addq a0,8,a0
+ extql t2,a0,t4
+ extqh t3,a0,t5
+ subq t0,8,t0
+ or t4,t5,t5
+ stq t5,0(a1)
+ addq a1,8,a1
+ beq t0,bcopy_da_finish1
+ ldq_u t2,7(a0)
+ addq a0,8,a0
+ extql t3,a0,t4
+ extqh t2,a0,t5
+ subq t0,8,t0
+ or t4,t5,t5
+ stq t5,0(a1)
+ addq a1,8,a1
+ bne t0,bcopy_da_lp
+
+bcopy_da_finish2:
+ /* Do the last new word */
+ mov t2,t3
+
+bcopy_da_finish1:
+ /* Do the last partial word */
+ ldq_u t2,-1(a3)
+ extql t3,a0,t3
+ extqh t2,a0,t2
+ or t2,t3,t2
+ br zero,bcopy_samealign_lp_end
+
+bcopy_da_finish:
+ /* Do the last word in the next source word */
+ ldq_u t3,-1(a3)
+ extql t2,a0,t2
+ extqh t3,a0,t3
+ or t2,t3,t2
+ insqh t2,a1,t3
+ insql t2,a1,t2
+ lda t4,-1(zero)
+ mskql t4,a2,t5
+ cmovne t5,t5,t4
+ insqh t4,a1,t5
+ insql t4,a1,t4
+ addq a1,a2,a4
+ ldq_u t6,0(a1)
+ ldq_u t7,-1(a4)
+ bic t6,t4,t6
+ bic t7,t5,t7
+ and t2,t4,t2
+ and t3,t5,t3
+ or t2,t6,t2
+ or t3,t7,t3
+ stq_u t3,-1(a4)
+ stq_u t2,0(a1)
+ RET
+
+bcopy_overlap:
+ /*
+ * Basically equivalent to previous case, only backwards.
+ * Not quite as highly optimized
+ */
+ addq a0,a2,a3
+ addq a1,a2,a4
+
+ /* less than 8 bytes - don't worry about overlap */
+ cmpule a2,8,t0
+ bne t0,bcopy_ov_short
+
+ /* Possibly do a partial first word */
+ and a4,7,t4
+ beq t4,bcopy_ov_nostart2
+ subq a3,t4,a3
+ subq a4,t4,a4
+ ldq_u t1,0(a3)
+ subq a2,t4,a2
+ ldq_u t2,7(a3)
+ ldq t3,0(a4)
+ extql t1,a3,t1
+ extqh t2,a3,t2
+ or t1,t2,t1
+ mskqh t3,t4,t3
+ mskql t1,t4,t1
+ or t1,t3,t1
+ stq t1,0(a4)
+
+bcopy_ov_nostart2:
+ bic a2,7,t4
+ and a2,7,a2
+ beq t4,bcopy_ov_lp_end
+
+bcopy_ov_lp:
+ /* This could be more pipelined, but it doesn't seem worth it */
+ ldq_u t0,-8(a3)
+ subq a4,8,a4
+ ldq_u t1,-1(a3)
+ subq a3,8,a3
+ extql t0,a3,t0
+ extqh t1,a3,t1
+ subq t4,8,t4
+ or t0,t1,t0
+ stq t0,0(a4)
+ bne t4,bcopy_ov_lp
+
+bcopy_ov_lp_end:
+ beq a2,bcopy_done
+
+ ldq_u t0,0(a0)
+ ldq_u t1,7(a0)
+ ldq_u t2,0(a1)
+ extql t0,a0,t0
+ extqh t1,a0,t1
+ or t0,t1,t0
+ insql t0,a1,t0
+ mskql t2,a1,t2
+ or t2,t0,t2
+ stq_u t2,0(a1)
+
+bcopy_done:
+ RET
+
+bcopy_ov_short:
+ ldq_u t2,0(a0)
+ br zero,bcopy_da_finish
+#endif
+ END(bcopy)
+
+LEAF(copyin, 3)
+#if 0
+ LDGP(pv)
+ lda sp, -16(sp) /* set up stack frame */
+ stq ra, (16-8)(sp) /* save ra */
+ ldiq t0, VM_MAXUSER_ADDRESS /* make sure that src addr */
+ cmpult a0, t0, t1 /* is in user space. */
+ beq t1, copyerr /* if it's not, error out. */
+ lda v0, copyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, curproc
+ ldq at_reg, P_ADDR(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(bcopy) /* do the copy. */
+ .set noat
+ ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ mov zero, v0 /* return 0. */
+ RET
+#endif
+ END(copyin)
+
+LEAF(copyout, 3)
+#if 0
+ LDGP(pv)
+ lda sp, -16(sp) /* set up stack frame */
+ stq ra, (16-8)(sp) /* save ra */
+ ldiq t0, VM_MAXUSER_ADDRESS /* make sure that dest addr */
+ cmpult a1, t0, t1 /* is in user space. */
+ beq t1, copyerr /* if it's not, error out. */
+ lda v0, copyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, curproc
+ ldq at_reg, P_ADDR(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(bcopy) /* do the copy. */
+ .set noat
+ ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ mov zero, v0 /* return 0. */
+ RET
+#endif
+ END(copyout)
+
+LEAF(copyerr, 0)
+#if 0
+ ldq t0, curproc
+ ldq t0, P_ADDR(t0)
+ stq zero, U_PCB_ONFAULT(t0) /* reset fault handler. */
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ ldiq v0, EFAULT /* return EFAULT. */
+ RET
+#endif
+ END(copyerr)
+
+/**************************************************************************/
+
+/*
+ * Kernel setjmp and longjmp. Rather minimalist.
+ *
+ * longjmp(label_t *a)
+ * will generate a "return (1)" from the last call to
+ * setjmp(label_t *a)
+ * by restoring registers from the stack,
+ */
+
+
+LEAF(setjmp, 1)
+#if 0
+ LDGP(pv)
+
+ stq ra, (0 * 8)(a0) /* return address */
+ stq s0, (1 * 8)(a0) /* callee-saved registers */
+ stq s1, (2 * 8)(a0)
+ stq s2, (3 * 8)(a0)
+ stq s3, (4 * 8)(a0)
+ stq s4, (5 * 8)(a0)
+ stq s5, (6 * 8)(a0)
+ stq s6, (7 * 8)(a0)
+ stq sp, (8 * 8)(a0)
+
+ ldiq t0, 0xbeeffedadeadbabe /* set magic number */
+ stq t0, (9 * 8)(a0)
+
+ mov zero, v0 /* return zero */
+ RET
+#endif
+END(setjmp)
+
+LEAF(longjmp, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, 0xbeeffedadeadbabe /* check magic number */
+ ldq t1, (9 * 8)(a0)
+ cmpeq t0, t1, t0
+ beq t0, longjmp_botch /* if bad, punt */
+
+ ldq ra, (0 * 8)(a0) /* return address */
+ ldq s0, (1 * 8)(a0) /* callee-saved registers */
+ ldq s1, (2 * 8)(a0)
+ ldq s2, (3 * 8)(a0)
+ ldq s3, (4 * 8)(a0)
+ ldq s4, (5 * 8)(a0)
+ ldq s5, (6 * 8)(a0)
+ ldq s6, (7 * 8)(a0)
+ ldq sp, (8 * 8)(a0)
+
+ ldiq v0, 1
+ RET
+
+longjmp_botch:
+ lda a0, longjmp_botchmsg
+ mov ra, a1
+ CALL(panic)
+ call_pal PAL_bugchk
+
+ .data
+longjmp_botchmsg:
+ .asciz "longjmp botch from %p"
+ .text
+#endif
+END(longjmp)
diff --git a/sys/ia64/ia64/support.s b/sys/ia64/ia64/support.s
new file mode 100644
index 0000000..91e3856
--- /dev/null
+++ b/sys/ia64/ia64/support.s
@@ -0,0 +1,662 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <machine/asm.h>
+#include <assym.s>
+
+ .text
+
+/**************************************************************************/
+
+/*
+ * fu{byte,word} : fetch a byte (word) from user memory
+ */
+
+ LEAF(suword, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ cmpult a0, t0, t1
+ beq t1, fusufault
+
+ lda t0, fusufault /* trap faults */
+ ldq t2, curproc
+ ldq t2, P_ADDR(t2)
+ stq t0, U_PCB_ONFAULT(t2)
+
+ stq a1, 0(a0) /* try the store */
+
+ stq zero, U_PCB_ONFAULT(t2) /* clean up */
+
+ mov zero, v0
+ RET
+#endif
+ END(suword)
+
+ LEAF(subyte, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ cmpult a0, t0, t1
+ beq t1, fusufault
+
+ lda t0, fusufault /* trap faults */
+ ldq t2, curproc
+ ldq t2, P_ADDR(t2)
+ stq t0, U_PCB_ONFAULT(t2)
+
+ zap a1, 0xfe, a1 /* mask off the byte to store */
+ insbl a1, a0, a1 /* shift it to the right place */
+ ldq_u t0, 0(a0) /* read the qword to store it in */
+ mskbl t0, a0, t0 /* make a place for our byte */
+ or a1, t0, a1 /* move it in */
+ stq_u a1, 0(a0) /* and put the byte back */
+
+ stq zero, U_PCB_ONFAULT(t2) /* clean up */
+
+ mov zero, v0
+ RET
+#endif
+ END(subyte)
+
+ LEAF(fuword, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ cmpult a0, t0, t1
+ beq t1, fusufault
+
+ lda t0, fusufault /* trap faults */
+ ldq t2, curproc
+ ldq t2, P_ADDR(t2)
+ stq t0, U_PCB_ONFAULT(t2)
+
+ ldq v0, 0(a0) /* try the fetch */
+
+ stq zero, U_PCB_ONFAULT(t2) /* clean up */
+
+ RET
+#endif
+ END(fuword)
+
+ LEAF(fubyte, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ cmpult a0, t0, t1
+ beq t1, fusufault
+
+ lda t0, fusufault /* trap faults */
+ ldq t2, curproc
+ ldq t2, P_ADDR(t2)
+ stq t0, U_PCB_ONFAULT(t2)
+
+ ldq_u v0, 0(a0) /* get the word containing our byte */
+ extbl v0, a0, v0 /* extract the byte */
+
+ stq zero, U_PCB_ONFAULT(t2) /* clean up */
+
+ RET
+#endif
+ END(fubyte)
+
+ LEAF(suibyte, 2)
+#if 0
+ ldiq v0, -1
+ RET
+#endif
+ END(suibyte)
+
+ LEAF(fusufault, 0)
+#if 0
+ ldq t0, curproc
+ ldq t0, P_ADDR(t0)
+ stq zero, U_PCB_ONFAULT(t0)
+ ldiq v0, -1
+ RET
+#endif
+ END(fusufault)
+
+LEAF(fswintrberr, 0)
+XLEAF(fuswintr) /* XXX what is a 'word'? */
+XLEAF(suswintr) /* XXX what is a 'word'? */
+#if 0
+ LDGP(pv)
+ ldiq v0, -1
+ RET
+#endif
+ END(fswintrberr)
+
+/**************************************************************************/
+
+/*
+ * Copy a null-terminated string within the kernel's address space.
+ * If lenp is not NULL, store the number of chars copied in *lenp
+ *
+ * int copystr(char *from, char *to, size_t len, size_t *lenp);
+ */
+LEAF(copystr, 4)
+#if 0
+ LDGP(pv)
+
+ mov a2, t0 /* t0 = i = len */
+ beq a2, Lcopystr2 /* if (len == 0), bail out */
+
+Lcopystr1:
+ ldq_u t1, 0(a0) /* t1 = *from */
+ extbl t1, a0, t1
+ ldq_u t3, 0(a1) /* set up t2 with quad around *to */
+ insbl t1, a1, t2
+ mskbl t3, a1, t3
+ or t3, t2, t3 /* add *from to quad around *to */
+ stq_u t3, 0(a1) /* write out that quad */
+
+ subl a2, 1, a2 /* len-- */
+ beq t1, Lcopystr2 /* if (*from == 0), bail out */
+ addq a1, 1, a1 /* to++ */
+ addq a0, 1, a0 /* from++ */
+ bne a2, Lcopystr1 /* if (len != 0) copy more */
+
+Lcopystr2:
+ beq a3, Lcopystr3 /* if (lenp != NULL) */
+ subl t0, a2, t0 /* *lenp = (i - len) */
+ stq t0, 0(a3)
+Lcopystr3:
+ beq t1, Lcopystr4 /* *from == '\0'; leave quietly */
+
+ ldiq v0, ENAMETOOLONG /* *from != '\0'; error. */
+ RET
+
+Lcopystr4:
+ mov zero, v0 /* return 0. */
+ RET
+#endif
+ END(copystr)
+
+LEAF(copyinstr, 4)
+#if 0
+ LDGP(pv)
+ lda sp, -16(sp) /* set up stack frame */
+ stq ra, (16-8)(sp) /* save ra */
+ ldiq t0, VM_MAXUSER_ADDRESS /* make sure that src addr */
+ cmpult a0, t0, t1 /* is in user space. */
+ beq t1, copyerr /* if it's not, error out. */
+ lda v0, copyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, curproc
+ ldq at_reg, P_ADDR(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(copystr) /* do the copy. */
+ .set noat
+ ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ RET /* v0 left over from copystr */
+#endif
+ END(copyinstr)
+
+LEAF(copyoutstr, 4)
+#if 0
+ LDGP(pv)
+ lda sp, -16(sp) /* set up stack frame */
+ stq ra, (16-8)(sp) /* save ra */
+ ldiq t0, VM_MAXUSER_ADDRESS /* make sure that dest addr */
+ cmpult a1, t0, t1 /* is in user space. */
+ beq t1, copyerr /* if it's not, error out. */
+ lda v0, copyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, curproc
+ ldq at_reg, P_ADDR(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(copystr) /* do the copy. */
+ .set noat
+ ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ RET /* v0 left over from copystr */
+#endif
+ END(copyoutstr)
+
+/*
+ * Alternative memory mover
+ */
+ LEAF(memcpy,3)
+#if 0
+ mov a0,t0
+ mov a1,a0
+ mov t0,a1
+ br bcopy
+#endif
+ END(memcpy)
+
+/*
+ * Copy a bytes within the kernel's address space.
+ *
+ * In the kernel, bcopy() doesn't have to handle the overlapping
+ * case; that's that ovbcopy() is for. However, it doesn't hurt
+ * to do both in bcopy, and it does provide a measure of safety.
+ *
+ * void bcopy(char *from, char *to, size_t len);
+ * void ovbcopy(char *from, char *to, size_t len);
+ */
+LEAF(bcopy,3)
+XLEAF(ovbcopy)
+#if 0
+ /* Check for negative length */
+ ble a2,bcopy_done
+
+ /* Check for overlap */
+ subq a1,a0,t5
+ cmpult t5,a2,t5
+ bne t5,bcopy_overlap
+
+ /* a3 = end address */
+ addq a0,a2,a3
+
+ /* Get the first word */
+ ldq_u t2,0(a0)
+
+ /* Do they have the same alignment? */
+ xor a0,a1,t0
+ and t0,7,t0
+ and a1,7,t1
+ bne t0,bcopy_different_alignment
+
+ /* src & dst have same alignment */
+ beq t1,bcopy_all_aligned
+
+ ldq_u t3,0(a1)
+ addq a2,t1,a2
+ mskqh t2,a0,t2
+ mskql t3,a0,t3
+ or t2,t3,t2
+
+ /* Dst is 8-byte aligned */
+
+bcopy_all_aligned:
+ /* If less than 8 bytes,skip loop */
+ subq a2,1,t0
+ and a2,7,a2
+ bic t0,7,t0
+ beq t0,bcopy_samealign_lp_end
+
+bcopy_samealign_lp:
+ stq_u t2,0(a1)
+ addq a1,8,a1
+ ldq_u t2,8(a0)
+ subq t0,8,t0
+ addq a0,8,a0
+ bne t0,bcopy_samealign_lp
+
+bcopy_samealign_lp_end:
+ /* If we're done, exit */
+ bne a2,bcopy_small_left
+ stq_u t2,0(a1)
+ RET
+
+bcopy_small_left:
+ mskql t2,a2,t4
+ ldq_u t3,0(a1)
+ mskqh t3,a2,t3
+ or t4,t3,t4
+ stq_u t4,0(a1)
+ RET
+
+bcopy_different_alignment:
+ /*
+ * this is the fun part
+ */
+ addq a0,a2,a3
+ cmpule a2,8,t0
+ bne t0,bcopy_da_finish
+
+ beq t1,bcopy_da_noentry
+
+ /* Do the initial partial word */
+ subq zero,a1,t0
+ and t0,7,t0
+ ldq_u t3,7(a0)
+ extql t2,a0,t2
+ extqh t3,a0,t3
+ or t2,t3,t5
+ insql t5,a1,t5
+ ldq_u t6,0(a1)
+ mskql t6,a1,t6
+ or t5,t6,t5
+ stq_u t5,0(a1)
+ addq a0,t0,a0
+ addq a1,t0,a1
+ subq a2,t0,a2
+ ldq_u t2,0(a0)
+
+bcopy_da_noentry:
+ subq a2,1,t0
+ bic t0,7,t0
+ and a2,7,a2
+ beq t0,bcopy_da_finish2
+
+bcopy_da_lp:
+ ldq_u t3,7(a0)
+ addq a0,8,a0
+ extql t2,a0,t4
+ extqh t3,a0,t5
+ subq t0,8,t0
+ or t4,t5,t5
+ stq t5,0(a1)
+ addq a1,8,a1
+ beq t0,bcopy_da_finish1
+ ldq_u t2,7(a0)
+ addq a0,8,a0
+ extql t3,a0,t4
+ extqh t2,a0,t5
+ subq t0,8,t0
+ or t4,t5,t5
+ stq t5,0(a1)
+ addq a1,8,a1
+ bne t0,bcopy_da_lp
+
+bcopy_da_finish2:
+ /* Do the last new word */
+ mov t2,t3
+
+bcopy_da_finish1:
+ /* Do the last partial word */
+ ldq_u t2,-1(a3)
+ extql t3,a0,t3
+ extqh t2,a0,t2
+ or t2,t3,t2
+ br zero,bcopy_samealign_lp_end
+
+bcopy_da_finish:
+ /* Do the last word in the next source word */
+ ldq_u t3,-1(a3)
+ extql t2,a0,t2
+ extqh t3,a0,t3
+ or t2,t3,t2
+ insqh t2,a1,t3
+ insql t2,a1,t2
+ lda t4,-1(zero)
+ mskql t4,a2,t5
+ cmovne t5,t5,t4
+ insqh t4,a1,t5
+ insql t4,a1,t4
+ addq a1,a2,a4
+ ldq_u t6,0(a1)
+ ldq_u t7,-1(a4)
+ bic t6,t4,t6
+ bic t7,t5,t7
+ and t2,t4,t2
+ and t3,t5,t3
+ or t2,t6,t2
+ or t3,t7,t3
+ stq_u t3,-1(a4)
+ stq_u t2,0(a1)
+ RET
+
+bcopy_overlap:
+ /*
+ * Basically equivalent to previous case, only backwards.
+ * Not quite as highly optimized
+ */
+ addq a0,a2,a3
+ addq a1,a2,a4
+
+ /* less than 8 bytes - don't worry about overlap */
+ cmpule a2,8,t0
+ bne t0,bcopy_ov_short
+
+ /* Possibly do a partial first word */
+ and a4,7,t4
+ beq t4,bcopy_ov_nostart2
+ subq a3,t4,a3
+ subq a4,t4,a4
+ ldq_u t1,0(a3)
+ subq a2,t4,a2
+ ldq_u t2,7(a3)
+ ldq t3,0(a4)
+ extql t1,a3,t1
+ extqh t2,a3,t2
+ or t1,t2,t1
+ mskqh t3,t4,t3
+ mskql t1,t4,t1
+ or t1,t3,t1
+ stq t1,0(a4)
+
+bcopy_ov_nostart2:
+ bic a2,7,t4
+ and a2,7,a2
+ beq t4,bcopy_ov_lp_end
+
+bcopy_ov_lp:
+ /* This could be more pipelined, but it doesn't seem worth it */
+ ldq_u t0,-8(a3)
+ subq a4,8,a4
+ ldq_u t1,-1(a3)
+ subq a3,8,a3
+ extql t0,a3,t0
+ extqh t1,a3,t1
+ subq t4,8,t4
+ or t0,t1,t0
+ stq t0,0(a4)
+ bne t4,bcopy_ov_lp
+
+bcopy_ov_lp_end:
+ beq a2,bcopy_done
+
+ ldq_u t0,0(a0)
+ ldq_u t1,7(a0)
+ ldq_u t2,0(a1)
+ extql t0,a0,t0
+ extqh t1,a0,t1
+ or t0,t1,t0
+ insql t0,a1,t0
+ mskql t2,a1,t2
+ or t2,t0,t2
+ stq_u t2,0(a1)
+
+bcopy_done:
+ RET
+
+bcopy_ov_short:
+ ldq_u t2,0(a0)
+ br zero,bcopy_da_finish
+#endif
+ END(bcopy)
+
+LEAF(copyin, 3)
+#if 0
+ LDGP(pv)
+ lda sp, -16(sp) /* set up stack frame */
+ stq ra, (16-8)(sp) /* save ra */
+ ldiq t0, VM_MAXUSER_ADDRESS /* make sure that src addr */
+ cmpult a0, t0, t1 /* is in user space. */
+ beq t1, copyerr /* if it's not, error out. */
+ lda v0, copyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, curproc
+ ldq at_reg, P_ADDR(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(bcopy) /* do the copy. */
+ .set noat
+ ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ mov zero, v0 /* return 0. */
+ RET
+#endif
+ END(copyin)
+
+LEAF(copyout, 3)
+#if 0
+ LDGP(pv)
+ lda sp, -16(sp) /* set up stack frame */
+ stq ra, (16-8)(sp) /* save ra */
+ ldiq t0, VM_MAXUSER_ADDRESS /* make sure that dest addr */
+ cmpult a1, t0, t1 /* is in user space. */
+ beq t1, copyerr /* if it's not, error out. */
+ lda v0, copyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, curproc
+ ldq at_reg, P_ADDR(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(bcopy) /* do the copy. */
+ .set noat
+ ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ mov zero, v0 /* return 0. */
+ RET
+#endif
+ END(copyout)
+
+LEAF(copyerr, 0)
+#if 0
+ ldq t0, curproc
+ ldq t0, P_ADDR(t0)
+ stq zero, U_PCB_ONFAULT(t0) /* reset fault handler. */
+ ldq ra, (16-8)(sp) /* restore ra. */
+ lda sp, 16(sp) /* kill stack frame. */
+ ldiq v0, EFAULT /* return EFAULT. */
+ RET
+#endif
+ END(copyerr)
+
+/**************************************************************************/
+
+/*
+ * Kernel setjmp and longjmp. Rather minimalist.
+ *
+ * longjmp(label_t *a)
+ * will generate a "return (1)" from the last call to
+ * setjmp(label_t *a)
+ * by restoring registers from the stack,
+ */
+
+
+LEAF(setjmp, 1)
+#if 0
+ LDGP(pv)
+
+ stq ra, (0 * 8)(a0) /* return address */
+ stq s0, (1 * 8)(a0) /* callee-saved registers */
+ stq s1, (2 * 8)(a0)
+ stq s2, (3 * 8)(a0)
+ stq s3, (4 * 8)(a0)
+ stq s4, (5 * 8)(a0)
+ stq s5, (6 * 8)(a0)
+ stq s6, (7 * 8)(a0)
+ stq sp, (8 * 8)(a0)
+
+ ldiq t0, 0xbeeffedadeadbabe /* set magic number */
+ stq t0, (9 * 8)(a0)
+
+ mov zero, v0 /* return zero */
+ RET
+#endif
+END(setjmp)
+
+LEAF(longjmp, 1)
+#if 0
+ LDGP(pv)
+
+ ldiq t0, 0xbeeffedadeadbabe /* check magic number */
+ ldq t1, (9 * 8)(a0)
+ cmpeq t0, t1, t0
+ beq t0, longjmp_botch /* if bad, punt */
+
+ ldq ra, (0 * 8)(a0) /* return address */
+ ldq s0, (1 * 8)(a0) /* callee-saved registers */
+ ldq s1, (2 * 8)(a0)
+ ldq s2, (3 * 8)(a0)
+ ldq s3, (4 * 8)(a0)
+ ldq s4, (5 * 8)(a0)
+ ldq s5, (6 * 8)(a0)
+ ldq s6, (7 * 8)(a0)
+ ldq sp, (8 * 8)(a0)
+
+ ldiq v0, 1
+ RET
+
+longjmp_botch:
+ lda a0, longjmp_botchmsg
+ mov ra, a1
+ CALL(panic)
+ call_pal PAL_bugchk
+
+ .data
+longjmp_botchmsg:
+ .asciz "longjmp botch from %p"
+ .text
+#endif
+END(longjmp)
diff --git a/sys/ia64/ia64/swtch.s b/sys/ia64/ia64/swtch.s
new file mode 100644
index 0000000..b7f4de1
--- /dev/null
+++ b/sys/ia64/ia64/swtch.s
@@ -0,0 +1,155 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asm.h>
+#include <machine/mutex.h>
+#include "assym.s"
+
+/**************************************************************************/
+
+/*
+ * savectx: save process context, i.e. callee-saved registers
+ *
+ * Arguments:
+ * in0 'struct pcb *' of the process that needs its context saved
+ *
+ * Return:
+ * ret0 0. (note that for child processes, it seems
+ * like savectx() returns 1, because the return address
+ * in the PCB is set to the return address from savectx().)
+ */
+
+LEAF(savectx, 1)
+ alloc r2=ar.pfs,1,0,0,0
+ ;;
+ flushrs // push out caller's dirty regs
+ mov r3=ar.unat // caller's value for ar.unat
+ ;;
+ mov ar.rsc=r0 // stop the RSE after the flush
+ ;;
+ mov r16=ar.rnat // read RSE's NaT collection
+ mov r17=in0
+ mov r18=ar.bspstore
+ ;;
+ st8.spill [r17]=r4,8 ;; // save r4..r6
+ st8.spill [r17]=r5,8 ;; // and accumulate NaT bits
+ st8.spill [r17]=r6,8 ;;
+ st8.spill [r17]=r7,8 ;;
+
+ stf.spill [r17]=f2,16 ;; // save f2..f5 with NaTVals
+ stf.spill [r17]=f3,16 ;;
+ stf.spill [r17]=f4,16 ;;
+ stf.spill [r17]=f5,16 ;;
+
+ mov r19=ar.unat // NaT bits for r4..r6
+ mov r20=pr
+ mov r21=rp
+ mov ret0=r0 // return zero
+
+ st8 [r17]=r3,8 ;; // save caller's ar.unat
+ st8 [r17]=sp,8 ;; // stack pointer
+ st8 [r17]=r2,8 ;; // ar.pfs
+ st8 [r17]=r18,8 ;; // ar.bspstore
+ st8 [r17]=r19,8 ;; // our NaT bits
+ st8 [r17]=r16,8 ;; // ar.rnat
+ st8 [r17]=r20,8 ;; // pr
+ st8 [r17]=r21,8 ;; // return address
+
+ mov ar.rsc=3 // turn RSE back on
+
+ br.ret.sptk.few rp
+ END(savectx)
+
+/*
+ * restorectx: restore process context, i.e. callee-saved registers
+ *
+ * Arguments:
+ * in0 'struct pcb *' of the process being restored
+ *
+ * Return:
+ * Does not return. We arrange things so that savectx appears to
+ * return a second time with a non-zero return value.
+ */
+
+LEAF(restorectx, 1)
+ alloc r2=ar.pfs,1,0,0,0
+
+ add r3=U_PCB_UNAT,in0 // point at NaT for r4..r7
+ mov ar.rsc=r0 ;; // switch off the RSE
+ ld8 r16=[r3] // load NaT for r4..r7
+ ;;
+ ld8.fill r4=[in0],8 ;; // restore r4
+ ld8.fill r5=[in0],8 ;; // restore r5
+ ld8.fill r6=[in0],8 ;; // restore r6
+ ld8.fill r7=[in0],8 ;; // restore r7
+
+ ldf.fill f2=[in0],8 ;; // restore f2
+ ldf.fill f3=[in0],8 ;; // restore f3
+ ldf.fill f4=[in0],8 ;; // restore f4
+ ldf.fill f5=[in0],8 ;; // restore f5
+
+ ld8 r16=[in0],8 ;; // caller's ar.unat
+ ld8 sp=[in0],8 ;; // stack pointer
+ ld8 r17=[in0],8 ;; // ar.pfs
+ ld8 r18=[in0],16 ;; // ar.bspstore, skip ar.unat
+ ld8 r19=[in0],8 ;; // ar.rnat
+ ld8 r20=[in0],8 ;; // pr
+ ld8 r21=[in0],8 ;; // iip
+
+ mov ar.unat=r16
+ mov ar.pfs=r17
+ mov ar.bspstore=r18 ;;
+ mov ar.rnat=r19
+ mov pr=r20,0x1ffff
+ mov rp=r21
+ mov ret0=r21 // non-zero return
+ ;;
+ loadrs
+ mov ar.rsc=3 // restart RSE
+ ;;
+ br.ret.sptk.few rp
+ END(restorectx)
+
+/**************************************************************************/
+
+IMPORT(want_resched, 4)
+
+/*
+ * switch_trampoline()
+ *
+ * Arrange for a function to be invoked neatly, after a cpu_switch().
+ *
+ * Invokes the function specified by the s0 register with the return
+ * address specified by the s1 register and with one argument, a
+ * pointer to the executing process's proc structure.
+ */
+LEAF(switch_trampoline, 0)
+ MTX_EXIT(sched_lock#, r14, r15)
+ /* XXX write this */
+ END(switch_trampoline)
+
diff --git a/sys/ia64/ia64/synch_machdep.c b/sys/ia64/ia64/synch_machdep.c
new file mode 100644
index 0000000..ebcd7a4
--- /dev/null
+++ b/sys/ia64/ia64/synch_machdep.c
@@ -0,0 +1,562 @@
+/*-
+ * Copyright (c) 1997, 1998 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
+ * $FreeBSD$
+ */
+
+#define MTX_STRS /* define common strings */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
+#include <sys/user.h>
+#include <ddb/ddb.h>
+#include <machine/atomic.h>
+#include <machine/clock.h>
+#include <machine/cpu.h>
+#include <machine/mutex.h>
+
+/* All mutexes in system (used for debug/panic) */
+struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
+ TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
+ { NULL, NULL }, &all_mtx, &all_mtx
+#ifdef SMP_DEBUG
+ , NULL, { NULL, NULL }, NULL, 0
+#endif
+};
+
+int mtx_cur_cnt;
+int mtx_max_cnt;
+
+extern void _mtx_enter_giant_def(void);
+extern void _mtx_exit_giant_def(void);
+
+static void propagate_priority(struct proc *) __unused;
+
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
+#define mtx_owner(m) (mtx_unowned(m) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
+
+#define RETIP(x) *(((u_int64_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
+
+/*
+ * XXX Temporary, for use from assembly language
+ */
+
+void
+_mtx_enter_giant_def(void)
+{
+
+ mtx_enter(&Giant, MTX_DEF);
+}
+
+void
+_mtx_exit_giant_def(void)
+{
+
+ mtx_exit(&Giant, MTX_DEF);
+}
+
+static void
+propagate_priority(struct proc *p)
+{
+ int pri = p->p_priority;
+ struct mtx *m = p->p_blocked;
+
+ for (;;) {
+ struct proc *p1;
+
+ p = mtx_owner(m);
+
+ if (p == NULL) {
+ /*
+ * This really isn't quite right. Really
+ * ought to bump priority of process that
+ * next acquires the mutex.
+ */
+ MPASS(m->mtx_lock == MTX_CONTESTED);
+ return;
+ }
+ MPASS(p->p_magic == P_MAGIC);
+ if (p->p_priority <= pri)
+ return;
+ /*
+ * If lock holder is actually running, just bump priority.
+ */
+ if (TAILQ_NEXT(p, p_procq) == NULL) {
+ MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
+ SET_PRIO(p, pri);
+ return;
+ }
+ /*
+ * If on run queue move to new run queue, and
+ * quit.
+ */
+ if (p->p_stat == SRUN) {
+ MPASS(p->p_blocked == NULL);
+ remrunqueue(p);
+ SET_PRIO(p, pri);
+ setrunqueue(p);
+ return;
+ }
+
+ /*
+ * If we aren't blocked on a mutex, give up and quit.
+ */
+ if (p->p_stat != SMTX) {
+ return;
+ }
+
+ /*
+ * Pick up the mutex that p is blocked on.
+ */
+ m = p->p_blocked;
+ MPASS(m != NULL);
+
+ /*
+ * Check if the proc needs to be moved up on
+ * the blocked chain
+ */
+ if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
+ p1->p_priority <= pri)
+ continue;
+
+ /*
+ * Remove proc from blocked chain
+ */
+ TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
+ MPASS(p1->p_magic == P_MAGIC);
+ if (p1->p_priority > pri)
+ break;
+ }
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ CTR4(KTR_LOCK,
+ "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
+ p, p1, m, m->mtx_description);
+ }
+}
+
+void
+mtx_enter_hard(struct mtx *m, int type, int psr)
+{
+ struct proc *p = CURPROC;
+
+ KASSERT(p != NULL, ("curproc is NULL in mutex"));
+
+ switch (type) {
+ case MTX_DEF:
+ if ((m->mtx_lock & MTX_FLAGMASK) == (u_int64_t)p) {
+ m->mtx_recurse++;
+ atomic_set_64(&m->mtx_lock, MTX_RECURSE);
+ CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
+ return;
+ }
+ CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%lx) [0x%lx]",
+ m, m->mtx_lock, RETIP(m));
+ while (!atomic_cmpset_64(&m->mtx_lock, MTX_UNOWNED,
+ (u_int64_t)p)) {
+ int v;
+ struct proc *p1;
+
+ mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
+ /*
+ * check if the lock has been released while
+ * waiting for the schedlock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_exit(&sched_lock, MTX_SPIN);
+ continue;
+ }
+ /*
+ * The mutex was marked contested on release. This
+ * means that there are processes blocked on it.
+ */
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL, ("contested mutex has no contesters"));
+ KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
+ m->mtx_lock = (u_int64_t)p | MTX_CONTESTED;
+ if (p1->p_priority < p->p_priority) {
+ SET_PRIO(p, p1->p_priority);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ return;
+ }
+ /*
+ * If the mutex isn't already contested and
+ * a failure occurs setting the contested bit the
+ * mutex was either release or the
+ * state of the RECURSION bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_64(&m->mtx_lock, v,
+ v | MTX_CONTESTED)) {
+ mtx_exit(&sched_lock, MTX_SPIN);
+ continue;
+ }
+
+ /* We definitely have to sleep for this lock */
+ mtx_assert(m, MA_NOTOWNED);
+
+#ifdef notyet
+ /*
+ * If we're borrowing an interrupted thread's VM
+ * context must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ CTR2(KTR_LOCK,
+ "mtx_enter: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
+ }
+ }
+#endif
+
+ /* Put us on the list of procs blocked on this mutex */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock &
+ MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m,
+ mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
+ p_procq);
+ }
+
+ p->p_blocked = m; /* Who we're blocked on */
+ p->p_stat = SMTX;
+#if 0
+ propagate_priority(p);
+#endif
+ CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
+ p, m, m->mtx_description);
+ mi_switch();
+ CTR3(KTR_LOCK,
+ "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
+ p, m, m->mtx_description);
+ mtx_exit(&sched_lock, MTX_SPIN);
+ }
+ ia64_mf();
+ return;
+ case MTX_SPIN:
+ case MTX_SPIN | MTX_FIRST:
+ case MTX_SPIN | MTX_TOPHALF:
+ {
+ int i = 0;
+
+ if (m->mtx_lock == (u_int64_t)p) {
+ m->mtx_recurse++;
+ return;
+ }
+ CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
+ for (;;) {
+ if (atomic_cmpset_64(&m->mtx_lock, MTX_UNOWNED,
+ (u_int64_t)p)) {
+ ia64_mf();
+ break;
+ }
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY (1);
+#ifdef DDB
+ else if (!db_active)
+#else
+ else
+#endif
+ panic(
+ "spin lock %s held by 0x%lx for > 5 seconds",
+ m->mtx_description, m->mtx_lock);
+ }
+ }
+
+#ifdef SMP_DEBUG
+ if (type != MTX_SPIN)
+ m->mtx_saveipl = 0xbeefface;
+ else
+#endif
+ m->mtx_savepsr = psr;
+ CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
+ return;
+ }
+ }
+}
+
+void
+mtx_exit_hard(struct mtx *m, int type)
+{
+ struct proc *p, *p1;
+ struct mtx *m1;
+ int pri;
+
+ switch (type) {
+ case MTX_DEF:
+ case MTX_DEF | MTX_NOSWITCH:
+ if (m->mtx_recurse != 0) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_64(&m->mtx_lock, MTX_RECURSE);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
+ return;
+ }
+ mtx_enter(&sched_lock, MTX_SPIN);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
+ p = CURPROC;
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ atomic_cmpset_64(&m->mtx_lock, m->mtx_lock,
+ MTX_UNOWNED);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
+ } else
+ m->mtx_lock = MTX_CONTESTED;
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
+ m, p1);
+ p1->p_blocked = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+ if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+#ifdef notyet
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ CTR2(KTR_LOCK,
+ "mtx_exit: 0x%x interruped 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
+ }
+ }
+#endif
+ setrunqueue(p);
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%lx",
+ m, m->mtx_lock);
+ mi_switch();
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%lx",
+ m, m->mtx_lock);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ return;
+ case MTX_SPIN:
+ case MTX_SPIN | MTX_FIRST:
+ if (m->mtx_recurse != 0) {
+ m->mtx_recurse--;
+ return;
+ }
+ ia64_mf();
+ if (atomic_cmpset_64(&m->mtx_lock, CURTHD, MTX_UNOWNED)) {
+ if (type & MTX_FIRST)
+ enable_intr(); /* XXX is this kosher? */
+ else {
+ MPASS(m->mtx_saveipl != 0xbeefface);
+ restore_intr(m->mtx_savepsr);
+ }
+ return;
+ }
+ panic("unsucuessful release of spin lock");
+ case MTX_SPIN | MTX_TOPHALF:
+ if (m->mtx_recurse != 0) {
+ m->mtx_recurse--;
+ return;
+ }
+ ia64_mf();
+ if (atomic_cmpset_64(&m->mtx_lock, CURTHD, MTX_UNOWNED))
+ return;
+ panic("unsucuessful release of spin lock");
+ default:
+ panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+ }
+}
+
+#define MV_DESTROY 0 /* validate before destory */
+#define MV_INIT 1 /* validate before init */
+
+#ifdef SMP_DEBUG
+
+int mtx_validate __P((struct mtx *, int));
+
+int
+mtx_validate(struct mtx *m, int when)
+{
+ struct mtx *mp;
+ int i;
+ int retval = 0;
+
+ if (m == &all_mtx || cold)
+ return 0;
+
+ mtx_enter(&all_mtx, MTX_DEF);
+ ASS(kernacc((caddr_t)all_mtx.mtx_next, 4, 1) == 1);
+ ASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
+ for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
+ if (kernacc((caddr_t)mp->mtx_next, 4, 1) != 1) {
+ panic("mtx_validate: mp=%p mp->mtx_next=%p",
+ mp, mp->mtx_next);
+ }
+ i++;
+ if (i > mtx_cur_cnt) {
+ panic("mtx_validate: too many in chain, known=%d\n",
+ mtx_cur_cnt);
+ }
+ }
+ ASS(i == mtx_cur_cnt);
+ switch (when) {
+ case MV_DESTROY:
+ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
+ if (mp == m)
+ break;
+ ASS(mp == m);
+ break;
+ case MV_INIT:
+ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
+ if (mp == m) {
+ /*
+ * Not good. This mutex already exits
+ */
+ retval = 1;
+#if 1
+ printf("re-initing existing mutex %s\n",
+ m->mtx_description);
+ ASS(m->mtx_lock == MTX_UNOWNED);
+ retval = 1;
+#else
+ panic("re-initing existing mutex %s",
+ m->mtx_description);
+#endif
+ }
+ }
+ mtx_exit(&all_mtx, MTX_DEF);
+ return (retval);
+}
+#endif
+
+void
+mtx_init(struct mtx *m, char *t, int flag)
+{
+
+ CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
+#ifdef SMP_DEBUG
+ if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ return;
+#endif
+ bzero((void *)m, sizeof *m);
+ TAILQ_INIT(&m->mtx_blocked);
+ m->mtx_description = t;
+ m->mtx_lock = MTX_UNOWNED;
+ /* Put on all mutex queue */
+ mtx_enter(&all_mtx, MTX_DEF);
+ m->mtx_next = &all_mtx;
+ m->mtx_prev = all_mtx.mtx_prev;
+ m->mtx_prev->mtx_next = m;
+ all_mtx.mtx_prev = m;
+ if (++mtx_cur_cnt > mtx_max_cnt)
+ mtx_max_cnt = mtx_cur_cnt;
+ mtx_exit(&all_mtx, MTX_DEF);
+ witness_init(m, flag);
+}
+
+void
+mtx_destroy(struct mtx *m)
+{
+
+ CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
+#ifdef SMP_DEBUG
+ if (m->mtx_next == NULL)
+ panic("mtx_destroy: %p (%s) already destroyed",
+ m, m->mtx_description);
+
+ if (!mtx_owned(m)) {
+ ASS(m->mtx_lock == MTX_UNOWNED);
+ } else {
+ ASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
+ }
+ mtx_validate(m, MV_DESTROY); /* diagnostic */
+#endif
+
+#ifdef WITNESS
+ if (m->mtx_witness)
+ witness_destroy(m);
+#endif /* WITNESS */
+
+ /* Remove from the all mutex queue */
+ mtx_enter(&all_mtx, MTX_DEF);
+ m->mtx_next->mtx_prev = m->mtx_prev;
+ m->mtx_prev->mtx_next = m->mtx_next;
+#ifdef SMP_DEBUG
+ m->mtx_next = m->mtx_prev = NULL;
+#endif
+ mtx_cur_cnt--;
+ mtx_exit(&all_mtx, MTX_DEF);
+}
+
+void
+cpu_switch()
+{
+ struct proc *p = curproc;
+
+ if (savectx(&p->p_addr->u_pcb))
+ return;
+
+ p = chooseproc();
+ curproc = p;
+ restorectx(&p->p_addr->u_pcb);
+}
diff --git a/sys/ia64/ia64/sys_machdep.c b/sys/ia64/ia64/sys_machdep.c
new file mode 100644
index 0000000..84e7620
--- /dev/null
+++ b/sys/ia64/ia64/sys_machdep.c
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
+ * $FreeBSD$
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+#include <sys/sysent.h>
+#include <sys/proc.h>
+
+#include <vm/vm.h>
+#include <sys/lock.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+
+#include <sys/user.h>
+
+#include <machine/cpu.h>
+#include <machine/sysarch.h>
+
+#include <vm/vm_kern.h> /* for kernel_map */
+
+#include <machine/fpu.h>
+
+#ifndef _SYS_SYSPROTO_H_
+struct sysarch_args {
+ int op;
+ char *parms;
+};
+#endif
+
+int
+sysarch(p, uap)
+ struct proc *p;
+ register struct sysarch_args *uap;
+{
+ int error = 0;
+
+ switch(SCARG(uap,op)) {
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+}
diff --git a/sys/ia64/ia64/timerreg.h b/sys/ia64/ia64/timerreg.h
new file mode 100644
index 0000000..0bfd7fc
--- /dev/null
+++ b/sys/ia64/ia64/timerreg.h
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Header: timerreg.h,v 1.2 93/02/28 15:08:58 mccanne Exp
+ * $FreeBSD$
+ */
+
+/*
+ *
+ * Register definitions for the Intel 8253 Programmable Interval Timer.
+ *
+ * This chip has three independent 16-bit down counters that can be
+ * read on the fly. There are three mode registers and three countdown
+ * registers. The countdown registers are addressed directly, via the
+ * first three I/O ports. The three mode registers are accessed via
+ * the fourth I/O port, with two bits in the mode byte indicating the
+ * register. (Why are hardware interfaces always so braindead?).
+ *
+ * To write a value into the countdown register, the mode register
+ * is first programmed with a command indicating the which byte of
+ * the two byte register is to be modified. The three possibilities
+ * are load msb (TMR_MR_MSB), load lsb (TMR_MR_LSB), or load lsb then
+ * msb (TMR_MR_BOTH).
+ *
+ * To read the current value ("on the fly") from the countdown register,
+ * you write a "latch" command into the mode register, then read the stable
+ * value from the corresponding I/O port. For example, you write
+ * TMR_MR_LATCH into the corresponding mode register. Presumably,
+ * after doing this, a write operation to the I/O port would result
+ * in undefined behavior (but hopefully not fry the chip).
+ * Reading in this manner has no side effects.
+ *
+ * [IBM-PC]
+ * The outputs of the three timers are connected as follows:
+ *
+ * timer 0 -> irq 0
+ * timer 1 -> dma chan 0 (for dram refresh)
+ * timer 2 -> speaker (via keyboard controller)
+ *
+ * Timer 0 is used to call hardclock.
+ * Timer 2 is used to generate console beeps.
+ *
+ * [PC-9801]
+ * The outputs of the three timers are connected as follows:
+ *
+ * timer 0 -> irq 0
+ * timer 1 -> speaker (via keyboard controller)
+ * timer 2 -> RS232C
+ *
+ * Timer 0 is used to call hardclock.
+ * Timer 1 is used to generate console beeps.
+ */
+
+/*
+ * Macros for specifying values to be written into a mode register.
+ */
+#define TIMER_CNTR0 (IO_TIMER1 + 0) /* timer 0 counter port */
+#ifdef PC98
+#define TIMER_CNTR1 0x3fdb /* timer 1 counter port */
+#define TIMER_CNTR2 (IO_TIMER1 + 4) /* timer 2 counter port */
+#define TIMER_MODE (IO_TIMER1 + 6) /* timer mode port */
+#else
+#define TIMER_CNTR1 (IO_TIMER1 + 1) /* timer 1 counter port */
+#define TIMER_CNTR2 (IO_TIMER1 + 2) /* timer 2 counter port */
+#define TIMER_MODE (IO_TIMER1 + 3) /* timer mode port */
+#endif
+#define TIMER_SEL0 0x00 /* select counter 0 */
+#define TIMER_SEL1 0x40 /* select counter 1 */
+#define TIMER_SEL2 0x80 /* select counter 2 */
+#define TIMER_INTTC 0x00 /* mode 0, intr on terminal cnt */
+#define TIMER_ONESHOT 0x02 /* mode 1, one shot */
+#define TIMER_RATEGEN 0x04 /* mode 2, rate generator */
+#define TIMER_SQWAVE 0x06 /* mode 3, square wave */
+#define TIMER_SWSTROBE 0x08 /* mode 4, s/w triggered strobe */
+#define TIMER_HWSTROBE 0x0a /* mode 5, h/w triggered strobe */
+#define TIMER_LATCH 0x00 /* latch counter for reading */
+#define TIMER_LSB 0x10 /* r/w counter LSB */
+#define TIMER_MSB 0x20 /* r/w counter MSB */
+#define TIMER_16BIT 0x30 /* r/w counter 16 bits, LSB first */
+#define TIMER_BCD 0x01 /* count in BCD */
+
diff --git a/sys/ia64/ia64/trap.c b/sys/ia64/ia64/trap.c
new file mode 100644
index 0000000..1c66e63
--- /dev/null
+++ b/sys/ia64/ia64/trap.c
@@ -0,0 +1,782 @@
+/* $FreeBSD$ */
+/* From: src/sys/alpha/alpha/trap.c,v 1.33 */
+/* $NetBSD: trap.c,v 1.31 1998/03/26 02:21:46 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include "opt_ddb.h"
+#include "opt_ktrace.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <machine/mutex.h>
+#include <sys/ktr.h>
+#include <sys/sysproto.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/exec.h>
+#include <sys/lock.h>
+#include <sys/vmmeter.h>
+#include <sys/sysent.h>
+#include <sys/syscall.h>
+#include <sys/pioctl.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_param.h>
+#include <sys/user.h>
+#include <sys/ptrace.h>
+#include <machine/clock.h>
+#include <machine/md_var.h>
+#include <machine/reg.h>
+#include <machine/pal.h>
+#include <machine/fpu.h>
+#include <machine/smp.h>
+#include <machine/mutex.h>
+
+#ifdef KTRACE
+#include <sys/uio.h>
+#include <sys/ktrace.h>
+#endif
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+u_int32_t want_resched;
+
+static int unaligned_fixup(struct trapframe *framep, struct proc *p);
+
+/*
+ * Define the code needed before returning to user mode, for
+ * trap and syscall.
+ */
+static int
+userret(register struct proc *p, u_int64_t pc, u_quad_t oticks, int have_giant)
+{
+ int sig, s;
+
+ /* take pending signals */
+ while ((sig = CURSIG(p)) != 0) {
+ if (have_giant == 0) {
+ mtx_enter(&Giant, MTX_DEF);
+ have_giant = 1;
+ }
+ postsig(sig);
+ }
+ p->p_priority = p->p_usrpri;
+ if (want_resched) {
+ /*
+ * Since we are curproc, a clock interrupt could
+ * change our priority without changing run queues
+ * (the running process is not kept on a run queue).
+ * If this happened after we setrunqueue ourselves but
+ * before we switch()'ed, we might not be on the queue
+ * indicated by our priority.
+ */
+ s = splstatclock();
+ mtx_enter(&sched_lock, MTX_SPIN);
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ mi_switch();
+ mtx_exit(&sched_lock, MTX_SPIN);
+ splx(s);
+ while ((sig = CURSIG(p)) != 0) {
+ if (have_giant == 0) {
+ mtx_enter(&Giant, MTX_DEF);
+ have_giant = 1;
+ }
+ postsig(sig);
+ }
+ }
+
+ /*
+ * If profiling, charge recent system time to the trapped pc.
+ */
+ if (p->p_flag & P_PROFIL) {
+ if (have_giant == 0) {
+ mtx_enter(&Giant, MTX_DEF);
+ have_giant = 1;
+ }
+ addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio);
+ }
+
+ curpriority = p->p_priority;
+ return (have_giant);
+}
+
+static const char *ia64_vector_names[] = {
+ "VHPT Translation", /* 0 */
+ "Instruction TLB", /* 1 */
+ "Data TLB", /* 2 */
+ "Alternate Instruction TLB", /* 3 */
+ "Alternate Data TLB", /* 4 */
+ "Data Nested TLB", /* 5 */
+ "Instruction Key Miss", /* 6 */
+ "Data Key Miss", /* 7 */
+ "Dirty-Bit", /* 8 */
+ "Instruction Access-Bit", /* 9 */
+ "Data Access-Bit", /* 10 */
+ "Break Instruction", /* 11 */
+ "External Interrupt", /* 12 */
+ "Reserved 13", /* 13 */
+ "Reserved 14", /* 14 */
+ "Reserved 15", /* 15 */
+ "Reserved 16", /* 16 */
+ "Reserved 17", /* 17 */
+ "Reserved 18", /* 18 */
+ "Reserved 19", /* 19 */
+ "Page Not Present", /* 20 */
+ "Key Permission", /* 21 */
+ "Instruction Access Rights", /* 22 */
+ "Data Access Rights", /* 23 */
+ "General Exception", /* 24 */
+ "Disabled FP-Register", /* 25 */
+ "NaT Consumption", /* 26 */
+ "Speculation", /* 27 */
+ "Reserved 28", /* 28 */
+ "Debug", /* 29 */
+ "Unaligned Reference", /* 30 */
+ "Unsupported Data Reference", /* 31 */
+ "Floating-point Fault", /* 32 */
+ "Floating-point Trap", /* 33 */
+ "Lower-Privilege Transfer Trap", /* 34 */
+ "Taken Branch Trap", /* 35 */
+ "Single Step Trap", /* 36 */
+ "Reserved 37", /* 37 */
+ "Reserved 38", /* 38 */
+ "Reserved 39", /* 39 */
+ "Reserved 40", /* 40 */
+ "Reserved 41", /* 41 */
+ "Reserved 42", /* 42 */
+ "Reserved 43", /* 43 */
+ "Reserved 44", /* 44 */
+ "IA-32 Exception", /* 45 */
+ "IA-32 Intercept", /* 46 */
+ "IA-32 Interrupt", /* 47 */
+ "Reserved 48", /* 48 */
+ "Reserved 49", /* 49 */
+ "Reserved 50", /* 50 */
+ "Reserved 51", /* 51 */
+ "Reserved 52", /* 52 */
+ "Reserved 53", /* 53 */
+ "Reserved 54", /* 54 */
+ "Reserved 55", /* 55 */
+ "Reserved 56", /* 56 */
+ "Reserved 57", /* 57 */
+ "Reserved 58", /* 58 */
+ "Reserved 59", /* 59 */
+ "Reserved 60", /* 60 */
+ "Reserved 61", /* 61 */
+ "Reserved 62", /* 62 */
+ "Reserved 63", /* 63 */
+ "Reserved 64", /* 64 */
+ "Reserved 65", /* 65 */
+ "Reserved 66", /* 66 */
+ "Reserved 67", /* 67 */
+};
+
+static void
+printtrap(int vector, struct trapframe *framep, int isfatal, int user)
+{
+ printf("\n");
+ printf("%s %s trap:\n", isfatal? "fatal" : "handled",
+ user ? "user" : "kernel");
+ printf("\n");
+ printf(" trap vector = 0x%x (%s)\n",
+ vector, ia64_vector_names[vector]);
+ printf(" iip = 0x%lx\n", framep->tf_cr_iip);
+ printf(" ipsr = 0x%lx\n", framep->tf_cr_ipsr);
+ printf(" isr = 0x%lx\n", framep->tf_cr_isr);
+ printf(" ifa = 0x%lx\n", framep->tf_cr_ifa);
+ printf(" curproc = %p\n", curproc);
+ if (curproc != NULL)
+ printf(" pid = %d, comm = %s\n", curproc->p_pid,
+ curproc->p_comm);
+ printf("\n");
+}
+
+/*
+ * Trap is called from exception.s to handle most types of processor traps.
+ * System calls are broken out for efficiency and ASTs are broken out
+ * to make the code a bit cleaner and more representative of the
+ * architecture.
+ */
+/*ARGSUSED*/
+void
+trap(int vector, struct trapframe *framep)
+{
+ register struct proc *p;
+ register int i;
+ u_int64_t ucode;
+ u_quad_t sticks;
+ int user;
+
+#if 0
+ /*
+ * Find our per-cpu globals.
+ */
+ globalp = (struct globaldata *) alpha_pal_rdval(); /* XXx */
+#endif
+
+ cnt.v_trap++;
+ p = curproc;
+ ucode = 0;
+
+ user = ((framep->tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_USER);
+ if (user) {
+ sticks = p->p_sticks;
+ p->p_md.md_tf = framep;
+ } else {
+ sticks = 0; /* XXX bogus -Wuninitialized warning */
+ }
+
+ switch (vector) {
+ case IA64_VEC_UNALIGNED_REFERENCE:
+ /*
+ * If user-land, do whatever fixups, printing, and
+ * signalling is appropriate (based on system-wide
+ * and per-process unaligned-access-handling flags).
+ */
+ if (user) {
+ mtx_enter(&Giant, MTX_DEF);
+ if ((i = unaligned_fixup(framep, p)) == 0) {
+ mtx_exit(&Giant, MTX_DEF);
+ goto out;
+ }
+ mtx_exit(&Giant, MTX_DEF);
+ ucode = framep->tf_cr_ifa; /* VA */
+ break;
+ }
+
+ /*
+ * Unaligned access from kernel mode is always an error,
+ * EVEN IF A COPY FAULT HANDLER IS SET!
+ *
+ * It's an error if a copy fault handler is set because
+ * the various routines which do user-initiated copies
+ * do so in a bcopy-like manner. In other words, the
+ * kernel never assumes that pointers provided by the
+ * user are properly aligned, and so if the kernel
+ * does cause an unaligned access it's a kernel bug.
+ */
+ goto dopanic;
+
+ case IA64_VEC_FLOATING_POINT_FAULT:
+ case IA64_VEC_FLOATING_POINT_TRAP:
+ /*
+ * If user-land, give a SIGFPE if software completion
+ * is not requested or if the completion fails.
+ */
+ if (user) {
+ i = SIGFPE;
+ ucode = /*a0*/ 0; /* exception summary */
+ break;
+ }
+
+ /* Always fatal in kernel. Should never happen. */
+ goto dopanic;
+
+ case IA64_VEC_BREAK:
+ /*
+ * This should never happen. Breaks enter the kernel
+ * via break().
+ */
+ goto dopanic;
+
+
+ case IA64_VEC_DISABLED_FP:
+ /*
+ * on exit from the kernel, if proc == fpcurproc,
+ * FP is enabled.
+ */
+ if (fpcurproc == p) {
+ printf("trap: fp disabled for fpcurproc == %p", p);
+ goto dopanic;
+ }
+
+ ia64_fpstate_switch(p);
+ goto out;
+ break;
+
+ case IA64_VEC_PAGE_NOT_PRESENT:
+ {
+ vm_offset_t va = framep->tf_cr_ifa;
+ struct vmspace *vm = NULL;
+ vm_map_t map;
+ vm_prot_t ftype = 0;
+ int rv;
+
+ mtx_enter(&Giant, MTX_DEF);
+ /*
+ * If it was caused by fuswintr or suswintr,
+ * just punt. Note that we check the faulting
+ * address against the address accessed by
+ * [fs]uswintr, in case another fault happens
+ * when they are running.
+ */
+ if (!user &&
+ p != NULL &&
+ p->p_addr->u_pcb.pcb_onfault ==
+ (unsigned long)fswintrberr &&
+ p->p_addr->u_pcb.pcb_accessaddr == va) {
+ framep->tf_cr_iip = p->p_addr->u_pcb.pcb_onfault;
+ p->p_addr->u_pcb.pcb_onfault = 0;
+ mtx_exit(&Giant, MTX_DEF);
+ goto out;
+ }
+
+ /*
+ * It is only a kernel address space fault iff:
+ * 1. !user and
+ * 2. pcb_onfault not set or
+ * 3. pcb_onfault set but kernel space data fault
+ * The last can occur during an exec() copyin where the
+ * argument space is lazy-allocated.
+ *
+ * For the purposes of the Linux emulator, we allow
+ * kernel accesses to a small region of the
+ * user stack which the emulator uses to
+ * translate syscall arguments.
+ */
+ if (!user
+ && ((va >= VM_MIN_KERNEL_ADDRESS)
+ || (p == NULL)
+ || (p->p_addr->u_pcb.pcb_onfault == 0))) {
+ if (va >= trunc_page(PS_STRINGS
+ - szsigcode
+ - SPARE_USRSPACE)
+ && va < round_page(PS_STRINGS
+ - szsigcode)) {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ } else {
+ map = kernel_map;
+ }
+ } else {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ }
+
+ if (framep->tf_cr_isr & (IA64_ISR_X | IA64_ISR_R))
+ ftype = VM_PROT_READ;
+ else
+ ftype = VM_PROT_WRITE;
+
+ va = trunc_page((vm_offset_t)va);
+
+ if (map != kernel_map) {
+ /*
+ * Keep swapout from messing with us
+ * during thiscritical time.
+ */
+ ++p->p_lock;
+
+ /*
+ * Grow the stack if necessary
+ */
+ /* grow_stack returns false only if va falls into
+ * a growable stack region and the stack growth
+ * fails. It returns true if va was not within
+ * a growable stack region, or if the stack
+ * growth succeeded.
+ */
+ if (!grow_stack (p, va)) {
+ rv = KERN_FAILURE;
+ --p->p_lock;
+ goto nogo;
+ }
+
+
+ /* Fault in the user page: */
+ rv = vm_fault(map, va, ftype,
+ (ftype & VM_PROT_WRITE)
+ ? VM_FAULT_DIRTY
+ : VM_FAULT_NORMAL);
+
+ --p->p_lock;
+ } else {
+ /*
+ * Don't have to worry about process
+ * locking or stacks in the kernel.
+ */
+ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
+ }
+
+ nogo:;
+ /*
+ * If this was a stack access we keep track of the
+ * maximum accessed stack size. Also, if vm_fault
+ * gets a protection failure it is due to accessing
+ * the stack region outside the current limit and
+ * we need to reflect that as an access error.
+ */
+ if (map != kernel_map &&
+ (caddr_t)va >= vm->vm_maxsaddr
+ && (caddr_t)va < (caddr_t)USRSTACK) {
+ if (rv == KERN_SUCCESS) {
+ unsigned nss;
+
+ nss = ia64_btop(round_page(USRSTACK - va));
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ } else if (rv == KERN_PROTECTION_FAILURE)
+ rv = KERN_INVALID_ADDRESS;
+ }
+ if (rv == KERN_SUCCESS) {
+ mtx_exit(&Giant, MTX_DEF);
+ goto out;
+ }
+
+ mtx_exit(&Giant, MTX_DEF);
+ ucode = va;
+ i = SIGSEGV;
+#ifdef DEBUG
+ printtrap(vector, framep, 1, user);
+#endif
+ break;
+ }
+
+ default:
+ goto dopanic;
+ }
+
+#ifdef DEBUG
+ printtrap(vector, framep, 1, user);
+#endif
+ trapsignal(p, i, ucode);
+out:
+ if (user) {
+ if (userret(p, framep->tf_cr_iip, sticks, 0))
+ mtx_exit(&Giant, MTX_DEF);
+ }
+ return;
+
+dopanic:
+ printtrap(vector, framep, 1, user);
+
+ /* XXX dump registers */
+
+#ifdef DDB
+ kdb_trap(vector, framep);
+#endif
+
+ panic("trap");
+}
+
+/*
+ * Process a system call.
+ *
+ * System calls are strange beasts. They are passed the syscall number
+ * in v0, and the arguments in the registers (as normal). They return
+ * an error flag in a3 (if a3 != 0 on return, the syscall had an error),
+ * and the return value (if any) in v0.
+ *
+ * The assembly stub takes care of moving the call number into a register
+ * we can get to, and moves all of the argument registers into their places
+ * in the trap frame. On return, it restores the callee-saved registers,
+ * a3, and v0 from the frame before returning to the user process.
+ */
+void
+syscall(code, framep)
+ u_int64_t code;
+ struct trapframe *framep;
+{
+#if 0
+ struct sysent *callp;
+ struct proc *p;
+ int error = 0;
+ u_int64_t opc;
+ u_quad_t sticks;
+ u_int64_t args[10]; /* XXX */
+ u_int hidden = 0, nargs;
+
+ /*
+ * Find our per-cpu globals.
+ */
+#if 0
+ globalp = (struct globaldata *) alpha_pal_rdval();
+#endif
+ mtx_enter(&Giant, MTX_DEF);
+
+#if notdef /* can't happen, ever. */
+ if ((framep->tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_KERN)
+ panic("syscall");
+#endif
+
+ cnt.v_syscall++;
+ p = curproc;
+ p->p_md.md_tf = framep;
+ opc = framep->tf_cr_iip;
+ sticks = p->p_sticks;
+
+#ifdef DIAGNOSTIC
+ alpha_fpstate_check(p);
+#endif
+
+ if (p->p_sysent->sv_prepsyscall) {
+ /* (*p->p_sysent->sv_prepsyscall)(framep, args, &code, &params); */
+ panic("prepsyscall");
+ } else {
+ /*
+ * syscall() and __syscall() are handled the same on
+ * the ia64, as everything is 64-bit aligned, anyway.
+ */
+ if (code == SYS_syscall || code == SYS___syscall) {
+ /*
+ * Code is first argument, followed by actual args.
+ */
+ code = framep->tf_regs[FRAME_A0];
+ hidden = 1;
+ }
+ }
+
+ if (p->p_sysent->sv_mask)
+ code &= p->p_sysent->sv_mask;
+
+ if (code >= p->p_sysent->sv_size)
+ callp = &p->p_sysent->sv_table[0];
+ else
+ callp = &p->p_sysent->sv_table[code];
+
+ nargs = (callp->sy_narg & SYF_ARGMASK) + hidden;
+ switch (nargs) {
+ default:
+ if (nargs > 10) /* XXX */
+ panic("syscall: too many args (%d)", nargs);
+ error = copyin((caddr_t)(alpha_pal_rdusp()), &args[6],
+ (nargs - 6) * sizeof(u_int64_t));
+ case 6:
+ args[5] = framep->tf_regs[FRAME_A5];
+ case 5:
+ args[4] = framep->tf_regs[FRAME_A4];
+ case 4:
+ args[3] = framep->tf_regs[FRAME_A3];
+ case 3:
+ args[2] = framep->tf_regs[FRAME_A2];
+ case 2:
+ args[1] = framep->tf_regs[FRAME_A1];
+ case 1:
+ args[0] = framep->tf_regs[FRAME_A0];
+ case 0:
+ break;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p->p_tracep, code, (callp->sy_narg & SYF_ARGMASK), args + hidden);
+#endif
+ if (error == 0) {
+ p->p_retval[0] = 0;
+ p->p_retval[1] = 0;
+
+ STOPEVENT(p, S_SCE, (callp->sy_narg & SYF_ARGMASK));
+
+ error = (*callp->sy_call)(p, args + hidden);
+ }
+
+
+ switch (error) {
+ case 0:
+ framep->tf_regs[FRAME_V0] = p->p_retval[0];
+ framep->tf_regs[FRAME_A4] = p->p_retval[1];
+ framep->tf_regs[FRAME_A3] = 0;
+ break;
+ case ERESTART:
+ framep->tf_regs[FRAME_PC] = opc;
+ break;
+ case EJUSTRETURN:
+ break;
+ default:
+ if (p->p_sysent->sv_errsize) {
+ if (error >= p->p_sysent->sv_errsize)
+ error = -1; /* XXX */
+ else
+ error = p->p_sysent->sv_errtbl[error];
+ }
+ framep->tf_regs[FRAME_V0] = error;
+ framep->tf_regs[FRAME_A3] = 1;
+ break;
+ }
+
+ /*
+ * Reinitialize proc pointer `p' as it may be different
+ * if this is a child returning from fork syscall.
+ */
+ p = curproc;
+
+ userret(p, framep->tf_regs[FRAME_PC], sticks, 1);
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
+#endif
+
+ /*
+ * This works because errno is findable through the
+ * register set. If we ever support an emulation where this
+ * is not the case, this code will need to be revisited.
+ */
+ STOPEVENT(p, S_SCX, code);
+ mtx_exit(&Giant, MTX_DEF);
+#endif
+}
+
+/*
+ * Process the tail end of a fork() for the child.
+ */
+void
+child_return(p)
+ struct proc *p;
+{
+ int have_giant;
+
+ /*
+ * Return values in the frame set by cpu_fork().
+ */
+
+ have_giant = userret(p, p->p_md.md_tf->tf_cr_iip, 0,
+ mtx_owned(&Giant));
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET)) {
+ if (have_giant == 0) {
+ mtx_enter(&Giant, MTX_DEF);
+ have_giant = 1;
+ }
+ ktrsysret(p->p_tracep, SYS_fork, 0, 0);
+ }
+#endif
+
+ if (have_giant)
+ mtx_exit(&Giant, MTX_DEF);
+}
+
+/*
+ * Process an asynchronous software trap.
+ * This is relatively easy.
+ */
+void
+ast(framep)
+ struct trapframe *framep;
+{
+ register struct proc *p;
+ u_quad_t sticks;
+
+ mtx_enter(&Giant, MTX_DEF);
+
+ p = curproc;
+ sticks = p->p_sticks;
+ p->p_md.md_tf = framep;
+
+ if ((framep->tf_cr_ipsr & IA64_PSR_CPL) != IA64_PSR_CPL_USER)
+ panic("ast and not user");
+
+ cnt.v_soft++;
+
+ PCPU_SET(astpending, 0);
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ addupc_task(p, p->p_stats->p_prof.pr_addr,
+ p->p_stats->p_prof.pr_ticks);
+ }
+
+ userret(p, framep->tf_cr_iip, sticks, 1);
+
+ mtx_exit(&Giant, MTX_DEF);
+}
+
+extern int ia64_unaligned_print, ia64_unaligned_fix;
+extern int ia64_unaligned_sigbus;
+
+static int
+unaligned_fixup(struct trapframe *framep, struct proc *p)
+{
+ vm_offset_t va = framep->tf_cr_ifa;
+ int doprint, dofix, dosigbus;
+ int signal, size = 0;
+ unsigned long uac;
+
+ /*
+ * Figure out what actions to take.
+ */
+
+ if (p)
+ uac = p->p_md.md_flags & MDP_UAC_MASK;
+ else
+ uac = 0;
+
+ doprint = ia64_unaligned_print && !(uac & MDP_UAC_NOPRINT);
+ dofix = ia64_unaligned_fix && !(uac & MDP_UAC_NOFIX);
+ dosigbus = ia64_unaligned_sigbus | (uac & MDP_UAC_SIGBUS);
+
+ /*
+ * See if the user can access the memory in question.
+ * Even if it's an unknown opcode, SEGV if the access
+ * should have failed.
+ */
+ if (!useracc((caddr_t)va, size ? size : 1, VM_PROT_WRITE)) {
+ signal = SIGSEGV;
+ goto out;
+ }
+
+ /*
+ * If we're supposed to be noisy, squawk now.
+ */
+ if (doprint) {
+ uprintf("pid %d (%s): unaligned access: va=0x%lx pc=0x%lx\n",
+ p->p_pid, p->p_comm, va, p->p_md.md_tf->tf_cr_iip);
+ }
+
+ /*
+ * If we should try to fix it and know how, give it a shot.
+ *
+ * We never allow bad data to be unknowingly used by the
+ * user process. That is, if we decide not to fix up an
+ * access we cause a SIGBUS rather than letting the user
+ * process go on without warning.
+ *
+ * If we're trying to do a fixup, we assume that things
+ * will be botched. If everything works out OK,
+ * unaligned_{load,store}_* clears the signal flag.
+ */
+ signal = SIGBUS;
+ if (dofix && size != 0) {
+ /*
+ * XXX not done yet.
+ */
+ }
+
+ /*
+ * Force SIGBUS if requested.
+ */
+ if (dosigbus)
+ signal = SIGBUS;
+
+out:
+ return (signal);
+}
diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c
new file mode 100644
index 0000000..53682b7
--- /dev/null
+++ b/sys/ia64/ia64/vm_machdep.c
@@ -0,0 +1,494 @@
+/*-
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * Copyright (c) 1994 John Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
+ * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
+ * $FreeBSD$
+ */
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/vnode.h>
+#include <sys/vmmeter.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/unistd.h>
+
+#include <machine/clock.h>
+#include <machine/cpu.h>
+#include <machine/fpu.h>
+#include <machine/md_var.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+
+#include <sys/user.h>
+
+/*
+ * quick version of vm_fault
+ */
+int
+vm_fault_quick(v, prot)
+ caddr_t v;
+ int prot;
+{
+ int r;
+ if (prot & VM_PROT_WRITE)
+ r = subyte(v, fubyte(v));
+ else
+ r = fubyte(v);
+ return(r);
+}
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the pcb, set up the stack so that the child
+ * ready to run and return to user mode.
+ */
+void
+cpu_fork(p1, p2, flags)
+ register struct proc *p1, *p2;
+ int flags;
+{
+ if ((flags & RFPROC) == 0)
+ return;
+
+ p2->p_md.md_tf = p1->p_md.md_tf;
+ p2->p_md.md_flags = p1->p_md.md_flags & (MDP_FPUSED | MDP_UAC_MASK);
+
+ /*
+ * Cache the physical address of the pcb, so we can
+ * swap to it easily.
+ */
+ p2->p_md.md_pcbpaddr = (void*)vtophys((vm_offset_t)&p2->p_addr->u_pcb);
+
+ /*
+ * Copy floating point state from the FP chip to the PCB
+ * if this process has state stored there.
+ */
+ ia64_fpstate_save(p1, 0);
+
+ /*
+ * Copy pcb and stack from proc p1 to p2. We do this as
+ * cheaply as possible, copying only the active part of the
+ * stack. The stack and pcb need to agree. Make sure that the
+ * new process has FEN disabled.
+ */
+ p2->p_addr->u_pcb = p1->p_addr->u_pcb;
+#if 0
+ p2->p_addr->u_pcb.pcb_hw.apcb_usp = ia64_pal_rdusp();
+ p2->p_addr->u_pcb.pcb_hw.apcb_flags &= ~IA64_PCB_FLAGS_FEN;
+#endif
+
+ /*
+ * Set the floating point state.
+ */
+#if 0
+ if ((p2->p_addr->u_pcb.pcb_fp_control & IEEE_INHERIT) == 0) {
+ p2->p_addr->u_pcb.pcb_fp_control = 0;
+ p2->p_addr->u_pcb.pcb_fp.fpr_cr = (FPCR_DYN_NORMAL
+ | FPCR_INVD | FPCR_DZED
+ | FPCR_OVFD | FPCR_INED
+ | FPCR_UNFD);
+ }
+#endif
+
+ /*
+ * Arrange for a non-local goto when the new process
+ * is started, to resume here, returning nonzero from setjmp.
+ */
+#ifdef DIAGNOSTIC
+ if (p1 != curproc)
+ panic("cpu_fork: curproc");
+ ia64_fpstate_check(p1);
+#endif
+
+ /*
+ * create the child's kernel stack, from scratch.
+ */
+ {
+ struct user *up = p2->p_addr;
+ struct trapframe *p2tf;
+
+ /*
+ * Pick a stack pointer, leaving room for a trapframe;
+ * copy trapframe from parent so return to user mode
+ * will be to right address, with correct registers.
+ */
+ p2tf = p2->p_md.md_tf = (struct trapframe *)
+ ((char *)p2->p_addr + USPACE - sizeof(struct trapframe));
+ bcopy(p1->p_md.md_tf, p2->p_md.md_tf,
+ sizeof(struct trapframe));
+
+ /*
+ * Set up return-value registers as fork() libc stub expects.
+ */
+#if 0
+ p2tf->tf_regs[FRAME_V0] = 0; /* child's pid (linux) */
+ p2tf->tf_regs[FRAME_A3] = 0; /* no error */
+ p2tf->tf_regs[FRAME_A4] = 1; /* is child (FreeBSD) */
+
+ /*
+ * Arrange for continuation at child_return(), which
+ * will return to exception_return(). Note that the child
+ * process doesn't stay in the kernel for long!
+ *
+ * This is an inlined version of cpu_set_kpc.
+ */
+ up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf;
+ up->u_pcb.pcb_context[0] =
+ (u_int64_t)child_return; /* s0: pc */
+ up->u_pcb.pcb_context[1] =
+ (u_int64_t)exception_return; /* s1: ra */
+ up->u_pcb.pcb_context[2] = (u_long) p2; /* s2: a0 */
+ up->u_pcb.pcb_context[7] =
+ (u_int64_t)switch_trampoline; /* ra: assembly magic */
+#endif
+ }
+}
+
+/*
+ * Intercept the return address from a freshly forked process that has NOT
+ * been scheduled yet.
+ *
+ * This is needed to make kernel threads stay in kernel mode.
+ */
+void
+cpu_set_fork_handler(p, func, arg)
+ struct proc *p;
+ void (*func) __P((void *));
+ void *arg;
+{
+#if 0
+ /*
+ * Note that the trap frame follows the args, so the function
+ * is really called like this: func(arg, frame);
+ */
+ p->p_addr->u_pcb.pcb_context[0] = (u_long) func;
+ p->p_addr->u_pcb.pcb_context[2] = (u_long) arg;
+#endif
+}
+
+/*
+ * cpu_exit is called as the last action during exit.
+ * We drop the fp state (if we have it) and switch to a live one.
+ * When the proc is reaped, cpu_wait() will gc the VM state.
+ */
+void
+cpu_exit(p)
+ register struct proc *p;
+{
+ ia64_fpstate_drop(p);
+
+ (void) splhigh();
+ cnt.v_swtch++;
+ cpu_switch();
+ panic("cpu_exit");
+}
+
+void
+cpu_wait(p)
+ struct proc *p;
+{
+ /* drop per-process resources */
+ pmap_dispose_proc(p);
+
+ /* and clean-out the vmspace */
+ vmspace_free(p->p_vmspace);
+}
+
+/*
+ * Dump the machine specific header information at the start of a core dump.
+ */
+int
+cpu_coredump(p, vp, cred)
+ struct proc *p;
+ struct vnode *vp;
+ struct ucred *cred;
+{
+
+ return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
+ (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
+ p));
+}
+
+#ifdef notyet
+static void
+setredzone(pte, vaddr)
+ u_short *pte;
+ caddr_t vaddr;
+{
+/* eventually do this by setting up an expand-down stack segment
+ for ss0: selector, allowing stack access down to top of u.
+ this means though that protection violations need to be handled
+ thru a double fault exception that must do an integral task
+ switch to a known good context, within which a dump can be
+ taken. a sensible scheme might be to save the initial context
+ used by sched (that has physical memory mapped 1:1 at bottom)
+ and take the dump while still in mapped mode */
+}
+#endif
+
+/*
+ * Map an IO request into kernel virtual address space.
+ *
+ * All requests are (re)mapped into kernel VA space.
+ * Notice that we use b_bufsize for the size of the buffer
+ * to be mapped. b_bcount might be modified by the driver.
+ */
+void
+vmapbuf(bp)
+ register struct buf *bp;
+{
+ register caddr_t addr, v, kva;
+ vm_offset_t pa;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vmapbuf");
+
+ for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += PAGE_SIZE, v += PAGE_SIZE) {
+ /*
+ * Do the vm_fault if needed; do the copy-on-write thing
+ * when reading stuff off device into memory.
+ */
+ vm_fault_quick(addr,
+ (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
+ pa = trunc_page(pmap_kextract((vm_offset_t) addr));
+ if (pa == 0)
+ panic("vmapbuf: page not present");
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ pmap_kenter((vm_offset_t) v, pa);
+ }
+
+ kva = bp->b_saveaddr;
+ bp->b_saveaddr = bp->b_data;
+ bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
+}
+
+/*
+ * Free the io map PTEs associated with this IO operation.
+ * We also invalidate the TLB entries and restore the original b_addr.
+ */
+void
+vunmapbuf(bp)
+ register struct buf *bp;
+{
+ register caddr_t addr;
+ vm_offset_t pa;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vunmapbuf");
+
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += PAGE_SIZE) {
+ pa = trunc_page(pmap_kextract((vm_offset_t) addr));
+ pmap_kremove((vm_offset_t) addr);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+ }
+
+ bp->b_data = bp->b_saveaddr;
+}
+
+/*
+ * Force reset the processor by invalidating the entire address space!
+ */
+void
+cpu_reset()
+{
+ /* prom_halt(0); */
+}
+
+int
+grow_stack(p, sp)
+ struct proc *p;
+ size_t sp;
+{
+ int rv;
+
+ rv = vm_map_growstack (p, sp);
+ if (rv != KERN_SUCCESS)
+ return (0);
+
+ return (1);
+}
+
+
+static int cnt_prezero;
+
+SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
+
+/*
+ * Implement the pre-zeroed page mechanism.
+ * This routine is called from the idle loop.
+ */
+
+#define ZIDLE_LO(v) ((v) * 2 / 3)
+#define ZIDLE_HI(v) ((v) * 4 / 5)
+
+int
+vm_page_zero_idle()
+{
+ static int free_rover;
+ static int zero_state;
+ vm_page_t m;
+ int s;
+
+ /*
+ * Attempt to maintain approximately 1/2 of our free pages in a
+ * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
+ * generally zeroing a page when the system is near steady-state.
+ * Otherwise we might get 'flutter' during disk I/O / IPC or
+ * fast sleeps. We also do not want to be continuously zeroing
+ * pages because doing so may flush our L1 and L2 caches too much.
+ */
+
+ if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
+ return(0);
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
+ return(0);
+
+#ifdef SMP
+ if (try_mplock()) {
+#endif
+ s = splvm();
+ m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
+ zero_state = 0;
+ if (m != NULL && (m->flags & PG_ZERO) == 0) {
+ vm_page_queues[m->queue].lcnt--;
+ TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
+ m->queue = PQ_NONE;
+ splx(s);
+#if 0
+ rel_mplock();
+#endif
+ pmap_zero_page(VM_PAGE_TO_PHYS(m));
+#if 0
+ get_mplock();
+#endif
+ (void)splvm();
+ vm_page_flag_set(m, PG_ZERO);
+ m->queue = PQ_FREE + m->pc;
+ vm_page_queues[m->queue].lcnt++;
+ TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m,
+ pageq);
+ ++vm_page_zero_count;
+ ++cnt_prezero;
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
+ zero_state = 1;
+ }
+ free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
+ splx(s);
+#ifdef SMP
+ rel_mplock();
+#endif
+ return (1);
+#ifdef SMP
+ }
+#endif
+ return (0);
+}
+
+/*
+ * Software interrupt handler for queued VM system processing.
+ */
+void
+swi_vm()
+{
+#if 0
+ if (busdma_swi_pending != 0)
+ busdma_swi();
+#endif
+}
+
+/*
+ * Tell whether this address is in some physical memory region.
+ * Currently used by the kernel coredump code in order to avoid
+ * dumping the ``ISA memory hole'' which could cause indefinite hangs,
+ * or other unpredictable behaviour.
+ */
+
+
+int
+is_physical_memory(addr)
+ vm_offset_t addr;
+{
+ /*
+ * stuff other tests for known memory-mapped devices (PCI?)
+ * here
+ */
+
+ return 1;
+}
diff --git a/sys/ia64/include/_limits.h b/sys/ia64/include/_limits.h
new file mode 100644
index 0000000..99ca2d8
--- /dev/null
+++ b/sys/ia64/include/_limits.h
@@ -0,0 +1,96 @@
+/* $FreeBSD$ */
+/* From: NetBSD: limits.h,v 1.3 1997/04/06 08:47:31 cgd Exp */
+
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ */
+
+#define CHAR_BIT 8 /* number of bits in a char */
+#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values
+ * are written as hex so that GCC will be quiet about large integer constants.
+ */
+#define SCHAR_MAX 0x7f /* max value for a signed char */
+#define SCHAR_MIN (-0x7f-1) /* min value for a signed char */
+
+#define UCHAR_MAX 0xffU /* max value for an unsigned char */
+#define CHAR_MAX 0x7f /* max value for a char */
+#define CHAR_MIN (-0x7f-1) /* min value for a char */
+
+#define USHRT_MAX 0xffffU /* max value for an unsigned short */
+#define SHRT_MAX 0x7fff /* max value for a short */
+#define SHRT_MIN (-0x7fff-1) /* min value for a short */
+
+#define UINT_MAX 0xffffffffU /* max value for an unsigned int */
+#define INT_MAX 0x7fffffff /* max value for an int */
+#define INT_MIN (-0x7fffffff-1) /* min value for an int */
+
+#define ULONG_MAX 0xffffffffffffffffUL /* max for an unsigned long */
+#define LONG_MAX 0x7fffffffffffffffL /* max for a long */
+#define LONG_MIN (-0x7fffffffffffffffL-1) /* min for a long */
+
+#if !defined(_ANSI_SOURCE)
+#define SSIZE_MAX LONG_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE) && !defined(_XOPEN_SOURCE)
+#define SIZE_T_MAX ULONG_MAX /* max value for a size_t */
+
+/* Quads and longs are the same on the alpha */
+#define UQUAD_MAX (ULONG_MAX) /* max value for a uquad_t */
+#define QUAD_MAX (LONG_MAX) /* max value for a quad_t */
+#define QUAD_MIN (LONG_MIN) /* min value for a quad_t */
+
+#endif /* !_POSIX_SOURCE && !_XOPEN_SOURCE */
+#endif /* !_ANSI_SOURCE */
+
+#if (!defined(_ANSI_SOURCE)&&!defined(_POSIX_SOURCE)) || defined(_XOPEN_SOURCE)
+#define LONG_BIT 64
+#define WORD_BIT 32
+
+#define DBL_DIG 15
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MIN 2.2250738585072014E-308
+
+#define FLT_DIG 6
+#define FLT_MAX 3.40282347E+38F
+#define FLT_MIN 1.17549435E-38F
+#endif
diff --git a/sys/ia64/include/ansi.h b/sys/ia64/include/ansi.h
new file mode 100644
index 0000000..c5ef23d
--- /dev/null
+++ b/sys/ia64/include/ansi.h
@@ -0,0 +1,119 @@
+/* $FreeBSD$ */
+/* From: NetBSD: ansi.h,v 1.9 1997/11/23 20:20:53 kleink Exp */
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ansi.h 8.2 (Berkeley) 1/4/94
+ */
+
+#ifndef _ANSI_H_
+#define _ANSI_H_
+
+/*
+ * Types which are fundamental to the implementation and may appear in
+ * more than one standard header are defined here. Standard headers
+ * then use:
+ * #ifdef _BSD_SIZE_T_
+ * typedef _BSD_SIZE_T_ size_t;
+ * #undef _BSD_SIZE_T_
+ * #endif
+ */
+#define _BSD_CLOCK_T_ int /* clock() */
+#define _BSD_PTRDIFF_T_ long /* ptr1 - ptr2 */
+#define _BSD_SIZE_T_ unsigned long /* sizeof() */
+#define _BSD_SSIZE_T_ long /* byte count or error */
+#define _BSD_TIME_T_ int /* time() */
+#define _BSD_VA_LIST_ __builtin_va_list /* va_list */
+#define _BSD_CLOCKID_T_ int /* clockid_t */
+#define _BSD_TIMER_T_ int /* timer_t */
+#define _BSD_SUSECONDS_T_ int /* suseconds_t */
+#define _BSD_USECONDS_T_ unsigned int /* useconds_t */
+
+/*
+ * Types which are fundamental to the implementation and must be used
+ * in more than one standard header although they are only declared in
+ * one (perhaps nonstandard) header are defined here. Standard headers
+ * use _BSD_XXX_T_ without undef'ing it.
+ */
+#define _BSD_CT_RUNE_T_ int /* arg type for ctype funcs */
+#define _BSD_OFF_T_ long /* file offset */
+#define _BSD_PID_T_ int /* process [group] */
+
+/*
+ * Runes (wchar_t) is declared to be an ``int'' instead of the more natural
+ * ``unsigned long'' or ``long''. Two things are happening here. It is not
+ * unsigned so that EOF (-1) can be naturally assigned to it and used. Also,
+ * it looks like 10646 will be a 31 bit standard. This means that if your
+ * ints cannot hold 32 bits, you will be in trouble. The reason an int was
+ * chosen over a long is that the is*() and to*() routines take ints (says
+ * ANSI C), but they use _RUNE_T_ instead of int. By changing it here, you
+ * lose a bit of ANSI conformance, but your programs will still work.
+ *
+ * Note that _WCHAR_T_ and _RUNE_T_ must be of the same type. When wchar_t
+ * and rune_t are typedef'd, _WCHAR_T_ will be undef'd, but _RUNE_T remains
+ * defined for ctype.h.
+ */
+#define _BSD_WCHAR_T_ int /* wchar_t */
+#define _BSD_WINT_T_ int /* wint_t */
+#define _BSD_RUNE_T_ int /* rune_t */
+
+/*
+ * Frequencies of the clock ticks reported by clock() and times(). They
+ * are the same as stathz for bogus historical reasons. They should be
+ * 1e6 because clock() and times() are implemented using getrusage() and
+ * there is no good reason why they should be less accurate. There is
+ * the bad reason that (broken) programs might not like clock_t or
+ * CLOCKS_PER_SEC being ``double'' (``unsigned long'' is not large enough
+ * to hold the required 24 hours worth of ticks if the frequency is
+ * 1000000ul, and ``unsigned long long'' would be nonstandard).
+ */
+#define _BSD_CLK_TCK_ 100
+#define _BSD_CLOCKS_PER_SEC_ 100
+
+/*
+ * Internal names for basic integral types. Omit the typedef if
+ * not possible for a machine/compiler combination.
+ */
+typedef __signed char __int8_t;
+typedef unsigned char __uint8_t;
+typedef short __int16_t;
+typedef unsigned short __uint16_t;
+typedef int __int32_t;
+typedef unsigned int __uint32_t;
+typedef long __int64_t;
+typedef unsigned long __uint64_t;
+
+typedef long __intptr_t;
+typedef unsigned long __uintptr_t;
+
+#endif /* _ANSI_H_ */
diff --git a/sys/ia64/include/asm.h b/sys/ia64/include/asm.h
new file mode 100644
index 0000000..4d20be3
--- /dev/null
+++ b/sys/ia64/include/asm.h
@@ -0,0 +1,257 @@
+/* $FreeBSD$ */
+/* From: NetBSD: asm.h,v 1.18 1997/11/03 04:22:06 ross Exp */
+
+/*
+ * Copyright (c) 1991,1990,1989,1994,1995,1996 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Assembly coding style
+ *
+ * This file contains macros and register defines to
+ * aid in writing more readable assembly code.
+ * Some rules to make assembly code understandable by
+ * a debugger are also noted.
+ *
+ * The document
+ *
+ * "ALPHA Calling Standard", DEC 27-Apr-90
+ *
+ * defines (a superset of) the rules and conventions
+ * we use. While we make no promise of adhering to
+ * such standard and its evolution (esp where we
+ * can get faster code paths) it is certainly intended
+ * that we be interoperable with such standard.
+ *
+ * In this sense, this file is a proper part of the
+ * definition of the (software) Alpha architecture.
+ */
+
+/*
+ * Macro to make a local label name.
+ */
+#define LLABEL(name,num) L ## name ## num
+
+/*
+ * MCOUNT
+ */
+
+#if !defined(GPROF) && !defined(PROF)
+#define MCOUNT /* nothing */
+#else
+#define MCOUNT \
+ .set noat; \
+ jsr at_reg,_mcount; \
+ .set at
+#endif
+
+/*
+ * LEAF
+ * Declare a global leaf function.
+ * A leaf function does not call other functions.
+ */
+#define LEAF(_name_, _n_args_) \
+ .global _name_; \
+ .proc _name_; \
+_name_:; \
+ .regstk _n_args_, 0, 0, 0 \
+ MCOUNT
+
+#define LEAF_NOPROFILE(_name_, _n_args_) \
+ .global _name_; \
+ .proc _name_; \
+_name_:; \
+ .regstk _n_args_, 0, 0, 0
+
+/*
+ * STATIC_LEAF
+ * Declare a local leaf function.
+ */
+#define STATIC_LEAF(_name_, _n_args_) \
+ .proc _name_; \
+_name_:; \
+ .regstk _n_args_, 0, 0, 0 \
+ MCOUNT
+/*
+ * XLEAF
+ * Global alias for a leaf function, or alternate entry point
+ */
+#define XLEAF(_name_) \
+ .globl _name_; \
+_name_:
+
+/*
+ * STATIC_XLEAF
+ * Local alias for a leaf function, or alternate entry point
+ */
+#define STATIC_XLEAF(_name_) \
+_name_:
+
+/*
+ * NESTED
+ * Declare a (global) nested function
+ * A nested function calls other functions and needs
+ * to use alloc to save registers.
+ */
+#define NESTED(_name_,_n_args_,_n_locals_,_n_outputs_, \
+ _pfs_reg_,_rp_reg_) \
+ .globl _name_; \
+ .proc _name_; \
+_name_:; \
+ alloc _pfs_reg_=ar.pfs,_n_args_,_n_locals_,_n_outputs_,0;; \
+ mov _rp_reg_=rp \
+ MCOUNT
+
+#define NESTED_NOPROFILE(_name_,_n_args_,_n_locals_,_n_outputs_, \
+ _pfs_reg_,_rp_reg_) \
+ .globl _name_; \
+ .proc _name_; \
+_name_:; \
+ alloc _pfs_reg_=ar.pfs,_n_args_,_n_locals_,_n_outputs_,0;; \
+ mov _rp_reg_=rp
+
+/*
+ * STATIC_NESTED
+ * Declare a local nested function.
+ */
+#define STATIC_NESTED(_name_,_n_args_,_n_locals_,_n_outputs_, \
+ _pfs_reg_,_rp_reg_) \
+ .proc _name_; \
+_name_:; \
+ alloc _pfs_reg_=ar.pfs,_n_args_,_n_locals_,_n_outputs_,0;; \
+ mov _rp_reg_=rp;; \
+ MCOUNT
+
+/*
+ * XNESTED
+ * Same as XLEAF, for a nested function.
+ */
+#define XNESTED(_name_) \
+ .globl _name_; \
+_name_:
+
+
+/*
+ * STATIC_XNESTED
+ * Same as STATIC_XLEAF, for a nested function.
+ */
+#define STATIC_XNESTED(_name_) \
+_name_:
+
+
+/*
+ * END
+ * Function delimiter
+ */
+#define END(_name_) \
+ .endp _name_
+
+
+/*
+ * EXPORT
+ * Export a symbol
+ */
+#define EXPORT(_name_) \
+ .global _name_; \
+_name_:
+
+
+/*
+ * IMPORT
+ * Make an external name visible, typecheck the size
+ */
+#define IMPORT(_name_, _size_) \
+ /* .extern _name_,_size_ */
+
+
+/*
+ * ABS
+ * Define an absolute symbol
+ */
+#define ABS(_name_, _value_) \
+ .globl _name_; \
+_name_ = _value_
+
+
+/*
+ * BSS
+ * Allocate un-initialized space for a global symbol
+ */
+#define BSS(_name_,_numbytes_) \
+ .comm _name_,_numbytes_
+
+
+/*
+ * MSG
+ * Allocate space for a message (a read-only ascii string)
+ */
+#define ASCIZ .asciz
+#define MSG(msg,reg,label) \
+ lda reg, label; \
+ .data; \
+label: ASCIZ msg; \
+ .text;
+
+/*
+ * System call glue.
+ */
+#define SYSCALLNUM(name) \
+ SYS_ ## name
+
+#define CALLSYS_NOERROR(name) \
+ mov r15=SYSCALLNUM(name); \
+ break 0x100000 ;;
+
+/*
+ * WEAK_ALIAS: create a weak alias (ELF only).
+ */
+#ifdef __ELF__
+#define WEAK_ALIAS(alias,sym) \
+ .weak alias; \
+ alias = sym
+#endif
+
+/*
+ * Kernel RCS ID tag and copyright macros
+ */
+
+#ifdef _KERNEL
+
+#ifdef __ELF__
+#define __KERNEL_SECTIONSTRING(_sec, _str) \
+ .section _sec ; .asciz _str ; .text
+#else /* __ELF__ */
+#define __KERNEL_SECTIONSTRING(_sec, _str) \
+ .data ; .asciz _str ; .align 3 ; .text
+#endif /* __ELF__ */
+
+#define __KERNEL_RCSID(_n, _s) __KERNEL_SECTIONSTRING(.ident, _s)
+#define __KERNEL_COPYRIGHT(_n, _s) __KERNEL_SECTIONSTRING(.copyright, _s)
+
+#ifdef NO_KERNEL_RCSIDS
+#undef __KERNEL_RCSID
+#define __KERNEL_RCSID(_n, _s) /* nothing */
+#endif
+
+#endif /* _KERNEL */
diff --git a/sys/ia64/include/atomic.h b/sys/ia64/include/atomic.h
new file mode 100644
index 0000000..a7b3e21
--- /dev/null
+++ b/sys/ia64/include/atomic.h
@@ -0,0 +1,190 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+/*
+ * Various simple arithmetic on memory which is atomic in the presence
+ * of interrupts and SMP safe.
+ */
+
+/*
+ * Everything is built out of cmpxchg.
+ */
+#define IA64_CMPXCHG(sz, sem, type, p, cmpval, newval) \
+({ \
+ type _cmpval = cmpval; \
+ type _newval = newval; \
+ volatile type *_p = (volatile type *) p; \
+ type _ret; \
+ \
+ __asm __volatile ( \
+ "mov ar.ccv=%2;;\n\t" \
+ "cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t" \
+ : "=r" (_ret), "=m" (*_p) \
+ : "r" (_cmpval), "r" (_newval), "m" (*_p) \
+ : "memory"); \
+ _ret; \
+})
+
+/*
+ * Some common forms of cmpxch.
+ */
+static __inline u_int32_t
+ia64_cmpxchg_acq_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+{
+ return IA64_CMPXCHG(4, acq, u_int32_t, p, cmpval, newval);
+}
+
+static __inline u_int32_t
+ia64_cmpxchg_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+{
+ return IA64_CMPXCHG(4, rel, u_int32_t, p, cmpval, newval);
+}
+
+static __inline u_int64_t
+ia64_cmpxchg_acq_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+{
+ return IA64_CMPXCHG(8, acq, u_int64_t, p, cmpval, newval);
+}
+
+static __inline u_int64_t
+ia64_cmpxchg_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+{
+ return IA64_CMPXCHG(8, rel, u_int64_t, p, cmpval, newval);
+}
+
+/*
+ * Store with release semantics is used to release locks.
+ */
+static __inline void
+ia64_st_rel_32(volatile u_int32_t* p, u_int32_t v)
+{
+ __asm __volatile ("st4.rel %0=%1"
+ : "=m" (*p)
+ : "r" (v)
+ : "memory");
+}
+
+static __inline void
+ia64_st_rel_64(volatile u_int64_t* p, u_int64_t v)
+{
+ __asm __volatile ("st8.rel %0=%1"
+ : "=m" (*p)
+ : "r" (v)
+ : "memory");
+}
+
+#define IA64_ATOMIC(sz, type, name, op) \
+ \
+static __inline void \
+atomic_##name(volatile type *p, type v) \
+{ \
+ type old; \
+ do { \
+ old = *p; \
+ } while (IA64_CMPXCHG(sz, acq, type, p, old, old op v) != old); \
+}
+
+IA64_ATOMIC(1, u_int8_t, set_8, |)
+IA64_ATOMIC(2, u_int16_t, set_16, |)
+IA64_ATOMIC(4, u_int32_t, set_32, |)
+IA64_ATOMIC(8, u_int64_t, set_64, |)
+
+IA64_ATOMIC(1, u_int8_t, clear_8, &~)
+IA64_ATOMIC(2, u_int16_t, clear_16, &~)
+IA64_ATOMIC(4, u_int32_t, clear_32, &~)
+IA64_ATOMIC(8, u_int64_t, clear_64, &~)
+
+IA64_ATOMIC(1, u_int8_t, add_8, +)
+IA64_ATOMIC(2, u_int16_t, add_16, +)
+IA64_ATOMIC(4, u_int32_t, add_32, +)
+IA64_ATOMIC(8, u_int64_t, add_64, +)
+
+IA64_ATOMIC(1, u_int8_t, subtract_8, -)
+IA64_ATOMIC(2, u_int16_t, subtract_16, -)
+IA64_ATOMIC(4, u_int32_t, subtract_32, -)
+IA64_ATOMIC(8, u_int64_t, subtract_64, -)
+
+#undef IA64_ATOMIC
+#undef IA64_CMPXCHG
+
+#define atomic_set_char atomic_set_8
+#define atomic_clear_char atomic_clear_8
+#define atomic_add_char atomic_add_8
+#define atomic_subtract_char atomic_subtract_8
+
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+#define atomic_add_short atomic_add_16
+#define atomic_subtract_short atomic_subtract_16
+
+#define atomic_set_int atomic_set_32
+#define atomic_clear_int atomic_clear_32
+#define atomic_add_int atomic_add_32
+#define atomic_subtract_int atomic_subtract_32
+
+#define atomic_set_long atomic_set_64
+#define atomic_clear_long atomic_clear_64
+#define atomic_add_long atomic_add_64
+#define atomic_subtract_long atomic_subtract_64
+
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+static __inline int
+atomic_cmpset_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+{
+ return ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval;
+}
+
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+static __inline int
+atomic_cmpset_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+{
+ return ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval;
+}
+
+#define atomic_cmpset_int atomic_cmpset_32
+#define atomic_cmpset_long atomic_cmpset_64
+
+static __inline int
+atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
+{
+ return atomic_cmpset_long((volatile u_long *)dst,
+ (u_long)exp, (u_long)src);
+}
+
+#endif /* ! _MACHINE_ATOMIC_H_ */
diff --git a/sys/ia64/include/bootinfo.h b/sys/ia64/include/bootinfo.h
new file mode 100644
index 0000000..86b7e7a
--- /dev/null
+++ b/sys/ia64/include/bootinfo.h
@@ -0,0 +1,86 @@
+/* $FreeBSD$ */
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * The boot program passes a pointer (in the boot environment virtual
+ * address address space; "BEVA") to a bootinfo to the kernel using
+ * the following convention:
+ *
+ * a0 contains first free page frame number
+ * a1 contains page number of current level 1 page table
+ * if a2 contains BOOTINFO_MAGIC and a4 is nonzero:
+ * a3 contains pointer (BEVA) to bootinfo
+ * a4 contains bootinfo version number
+ * if a2 contains BOOTINFO_MAGIC and a4 contains 0 (backward compat):
+ * a3 contains pointer (BEVA) to bootinfo version
+ * (u_long), then the bootinfo
+ */
+
+#define BOOTINFO_MAGIC 0xdeadbeeffeedface
+
+struct bootinfo_v1 {
+ u_long ssym; /* 0: start of kernel sym table */
+ u_long esym; /* 8: end of kernel sym table */
+ char boot_flags[64]; /* 16: boot flags */
+ char booted_kernel[64]; /* 80: name of booted kernel */
+ void *hwrpb; /* 144: hwrpb pointer (BEVA) */
+ u_long hwrpbsize; /* 152: size of hwrpb data */
+ int (*cngetc) __P((void)); /* 160: console getc pointer */
+ void (*cnputc) __P((int)); /* 168: console putc pointer */
+ void (*cnpollc) __P((int)); /* 176: console pollc pointer */
+ u_long pad[6]; /* 184: rsvd for future use */
+ char *envp; /* 232: start of environment */
+ u_long kernend; /* 240: end of kernel */
+ u_long modptr; /* 248: FreeBSD module base */
+ /* 256: total size */
+};
+
+/*
+ * Kernel-internal structure used to hold important bits of boot
+ * information. NOT to be used by boot blocks.
+ *
+ * Note that not all of the fields from the bootinfo struct(s)
+ * passed by the boot blocks aren't here (because they're not currently
+ * used by the kernel!). Fields here which aren't supplied by the
+ * bootinfo structure passed by the boot blocks are supposed to be
+ * filled in at startup with sane contents.
+ */
+struct bootinfo_kernel {
+ u_long ssym; /* start of syms */
+ u_long esym; /* end of syms */
+ u_long modptr; /* FreeBSD module pointer */
+ u_long kernend; /* "end of kernel" from boot code */
+ char *envp; /* "end of kernel" from boot code */
+ u_long hwrpb_phys; /* hwrpb physical address */
+ u_long hwrpb_size; /* size of hwrpb data */
+ char boot_flags[64]; /* boot flags */
+ char booted_kernel[64]; /* name of booted kernel */
+ char booted_dev[64]; /* name of booted device */
+};
+
+extern struct bootinfo_kernel bootinfo;
diff --git a/sys/ia64/include/bus.h b/sys/ia64/include/bus.h
new file mode 100644
index 0000000..c054193
--- /dev/null
+++ b/sys/ia64/include/bus.h
@@ -0,0 +1,1173 @@
+/* $NetBSD: bus.h,v 1.12 1997/10/01 08:25:15 fvdl Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $FreeBSD$ */
+
+#ifndef _MACHINE_BUS_H_
+#define _MACHINE_BUS_H_
+
+#include <machine/cpufunc.h>
+
+/*
+ * To remain compatible with NetBSD's interface, default to both memio and
+ * pio when neither of them is defined.
+ */
+#if !defined(_MACHINE_BUS_PIO_H_) && !defined(_IA64_BUS_MEMIO_H_)
+#define _MACHINE_BUS_PIO_H_
+#define _MACHINE_BUS_MEMIO_H_
+#endif
+
+/*
+ * Values for the ia64 bus space tag, not to be used directly by MI code.
+ */
+#define IA64_BUS_SPACE_IO 0 /* space is i/o space */
+#define IA64_BUS_SPACE_MEM 1 /* space is mem space */
+
+/*
+ * Bus address and size types
+ */
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+
+#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE (64 * 1024) /* Maximum supported size */
+#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXADDR 0xFFFFFFFF
+
+#define BUS_SPACE_UNRESTRICTED (~0)
+
+/*
+ * Access methods for bus resources and address space.
+ */
+typedef int bus_space_tag_t;
+typedef u_long bus_space_handle_t;
+
+/*
+ * Map a region of device bus space into CPU virtual address space.
+ */
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_LINEAR 0x02
+
+int bus_space_map(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
+ int flags, bus_space_handle_t *bshp);
+
+/*
+ * Unmap a region of device bus space.
+ */
+
+void bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t size);
+
+/*
+ * Get a new handle for a subregion of an already-mapped area of bus space.
+ */
+
+int bus_space_subregion(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t offset, bus_size_t size,
+ bus_space_handle_t *nbshp);
+
+/*
+ * Allocate a region of memory that is accessible to devices in bus space.
+ */
+
+int bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart,
+ bus_addr_t rend, bus_size_t size, bus_size_t align,
+ bus_size_t boundary, int flags, bus_addr_t *addrp,
+ bus_space_handle_t *bshp);
+
+/*
+ * Free a region of bus space accessible memory.
+ */
+
+void bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t size);
+
+#if defined(_MACHINE_BUS_PIO_H_) || defined(_MACHINE_BUS_MEMIO_H_)
+
+/*
+ * Read a 1, 2, 4, or 8 byte quantity from bus space
+ * described by tag/handle/offset.
+ */
+static __inline u_int8_t bus_space_read_1(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int16_t bus_space_read_2(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int32_t bus_space_read_4(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int8_t
+bus_space_read_1(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+#if defined (_MACHINE_BUS_PIO_H_)
+#if defined (_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ return (inb(handle + offset));
+#endif
+#if defined (_MACHINE_BUS_MEMIO_H_)
+ return (readb(handle + offset));
+#endif
+}
+
+static __inline u_int16_t
+bus_space_read_2(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ return (inw(handle + offset));
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ return (readw(handle + offset));
+#endif
+}
+
+static __inline u_int32_t
+bus_space_read_4(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ return (inl(handle + offset));
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ return (readl(handle + offset));
+#endif
+}
+
+#if 0 /* Cause a link error for bus_space_read_8 */
+#define bus_space_read_8(t, h, o) !!! bus_space_read_8 unimplemented !!!
+#endif
+
+/*
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle/offset and copy into buffer provided.
+ */
+static __inline void bus_space_read_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_read_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr, size_t count)
+{
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ *addr++ = inb(bsh + offset);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ *addr++ = readb(bsh + offset);
+#endif
+}
+
+static __inline void
+bus_space_read_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ *addr++ = inw(baddr);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ *addr++ = readw(baddr);
+#endif
+}
+
+static __inline void
+bus_space_read_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ *addr++ = inl(baddr);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ *addr++ = readl(baddr);
+#endif
+}
+
+#if 0 /* Cause a link error for bus_space_read_multi_8 */
+#define bus_space_read_multi_8 !!! bus_space_read_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle and starting at `offset' and copy into
+ * buffer provided.
+ */
+static __inline void bus_space_read_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr,
+ size_t count);
+
+
+static __inline void
+bus_space_read_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--) {
+ *addr++ = inb(baddr);
+ baddr += 1;
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--) {
+ *addr++ = readb(baddr);
+ baddr += 1;
+ }
+#endif
+}
+
+static __inline void
+bus_space_read_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--) {
+ *addr++ = inw(baddr);
+ baddr += 2;
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--) {
+ *addr++ = readw(baddr);
+ baddr += 2;
+ }
+#endif
+}
+
+static __inline void
+bus_space_read_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--) {
+ *addr++ = inl(baddr);
+ baddr += 4;
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--) {
+ *addr++ = readb(baddr);
+ baddr += 4;
+ }
+#endif
+}
+
+#if 0 /* Cause a link error for bus_space_read_region_8 */
+#define bus_space_read_region_8 !!! bus_space_read_region_8 unimplemented !!!
+#endif
+
+/*
+ * Write the 1, 2, 4, or 8 byte value `value' to bus space
+ * described by tag/handle/offset.
+ */
+
+static __inline void bus_space_write_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value);
+
+static __inline void bus_space_write_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value);
+
+static __inline void bus_space_write_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value);
+
+static __inline void
+bus_space_write_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value)
+{
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ outb(bsh + offset, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ writeb(bsh + offset, value);
+#endif
+}
+
+static __inline void
+bus_space_write_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value)
+{
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ outw(bsh + offset, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ writew(bsh + offset, value);
+#endif
+}
+
+static __inline void
+bus_space_write_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value)
+{
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ outl(bsh + offset, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ writel(bsh + offset, value);
+#endif
+}
+
+#if 0 /* Cause a link error for bus_space_write_8 */
+#define bus_space_write_8 !!! bus_space_write_8 not implemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
+ * provided to bus space described by tag/handle/offset.
+ */
+
+static __inline void bus_space_write_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int8_t *addr,
+ size_t count);
+static __inline void bus_space_write_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_write_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_write_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ outb(baddr, *addr++);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ writeb(baddr, *addr++);
+#endif
+}
+
+static __inline void
+bus_space_write_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ outw(baddr, *addr++);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ writew(baddr, *addr++);
+#endif
+}
+
+static __inline void
+bus_space_write_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ outl(baddr, *addr++);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ writel(baddr, *addr++);
+#endif
+}
+
+#if 0 /* Cause a link error for bus_space_write_multi_8 */
+#define bus_space_write_multi_8(t, h, o, a, c) \
+ !!! bus_space_write_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer provided
+ * to bus space described by tag/handle starting at `offset'.
+ */
+
+static __inline void bus_space_write_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int8_t *addr,
+ size_t count);
+static __inline void bus_space_write_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int16_t *addr,
+ size_t count);
+static __inline void bus_space_write_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_write_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--) {
+ outb(baddr, *addr++);
+ baddr += 1;
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--) {
+ writeb(baddr, *addr++);
+ baddr += 1;
+ }
+#endif
+}
+
+static __inline void
+bus_space_write_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--) {
+ outw(baddr, *addr++);
+ baddr += 2;
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--) {
+ writew(baddr, *addr++);
+ baddr += 2;
+ }
+#endif
+}
+
+static __inline void
+bus_space_write_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--) {
+ outl(baddr, *addr++);
+ baddr += 4;
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--) {
+ writel(baddr, *addr++);
+ baddr += 4;
+ }
+#endif
+}
+
+#if 0 /* Cause a link error for bus_space_write_region_8 */
+#define bus_space_write_region_8 \
+ !!! bus_space_write_region_8 unimplemented !!!
+#endif
+
+/*
+ * Write the 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle/offset `count' times.
+ */
+
+static __inline void bus_space_set_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int8_t value, size_t count);
+static __inline void bus_space_set_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int16_t value, size_t count);
+static __inline void bus_space_set_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int32_t value, size_t count);
+
+static __inline void
+bus_space_set_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ outb(addr, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ writeb(addr, value);
+#endif
+}
+
+static __inline void
+bus_space_set_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ outw(addr, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ writew(addr, value);
+#endif
+}
+
+static __inline void
+bus_space_set_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ while (count--)
+ outl(addr, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ while (count--)
+ writel(addr, value);
+#endif
+}
+
+#if 0 /* Cause a link error for bus_space_set_multi_8 */
+#define bus_space_set_multi_8 !!! bus_space_set_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle starting at `offset'.
+ */
+
+static __inline void bus_space_set_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value,
+ size_t count);
+static __inline void bus_space_set_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value,
+ size_t count);
+static __inline void bus_space_set_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value,
+ size_t count);
+
+static __inline void
+bus_space_set_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ for (; count != 0; count--, addr++)
+ outb(addr, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ for (; count != 0; count--, addr++)
+ writeb(addr, value);
+#endif
+}
+
+static __inline void
+bus_space_set_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ for (; count != 0; count--, addr += 2)
+ outw(addr, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ for (; count != 0; count--, addr += 2)
+ writew(addr, value);
+#endif
+}
+
+static __inline void
+bus_space_set_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ for (; count != 0; count--, addr += 4)
+ outl(addr, value);
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ for (; count != 0; count--, addr += 4)
+ writel(addr, value);
+#endif
+}
+
+#if 0 /* Cause a link error for bus_space_set_region_8 */
+#define bus_space_set_region_8 !!! bus_space_set_region_8 unimplemented !!!
+#endif
+
+/*
+ * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
+ * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
+ */
+
+static __inline void bus_space_copy_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void bus_space_copy_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void bus_space_copy_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void
+bus_space_copy_region_1(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ bus_addr_t addr1 = bsh1 + off1;
+ bus_addr_t addr2 = bsh2 + off2;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1++, addr2++)
+ outb(addr2, inb(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += (count - 1), addr2 += (count - 1);
+ count != 0; count--, addr1--, addr2--)
+ outb(addr2, inb(addr1));
+ }
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1++, addr2++)
+ writeb(addr2, readb(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += (count - 1), addr2 += (count - 1);
+ count != 0; count--, addr1--, addr2--)
+ writeb(addr2, readb(addr1));
+ }
+ }
+#endif
+}
+
+static __inline void
+bus_space_copy_region_2(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ bus_addr_t addr1 = bsh1 + off1;
+ bus_addr_t addr2 = bsh2 + off2;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1 += 2, addr2 += 2)
+ outw(addr2, inw(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += 2 * (count - 1), addr2 += 2 * (count - 1);
+ count != 0; count--, addr1 -= 2, addr2 -= 2)
+ outw(addr2, inw(addr1));
+ }
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1 += 2, addr2 += 2)
+ writew(addr2, readw(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += 2 * (count - 1), addr2 += 2 * (count - 1);
+ count != 0; count--, addr1 -= 2, addr2 -= 2)
+ writew(addr2, readw(addr1));
+ }
+ }
+#endif
+}
+
+static __inline void
+bus_space_copy_region_4(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ bus_addr_t addr1 = bsh1 + off1;
+ bus_addr_t addr2 = bsh2 + off2;
+
+#if defined(_MACHINE_BUS_PIO_H_)
+#if defined(_MACHINE_BUS_MEMIO_H_)
+ if (tag == IA64_BUS_SPACE_IO)
+#endif
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1 += 4, addr2 += 4)
+ outl(addr2, inl(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += 4 * (count - 1), addr2 += 4 * (count - 1);
+ count != 0; count--, addr1 -= 4, addr2 -= 4)
+ outl(addr2, inl(addr1));
+ }
+ }
+#endif
+#if defined(_MACHINE_BUS_MEMIO_H_)
+#if defined(_MACHINE_BUS_PIO_H_)
+ else
+#endif
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1 += 4, addr2 += 4)
+ writel(addr2, readl(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += 4 * (count - 1), addr2 += 4 * (count - 1);
+ count != 0; count--, addr1 -= 4, addr2 -= 4)
+ writel(addr2, readl(addr1));
+ }
+ }
+#endif
+}
+
+#endif /* defined(_MACHINE_BUS_PIO_H_) || defined(_MACHINE_BUS_MEMIO_H_) */
+
+#if 0 /* Cause a link error for bus_space_copy_8 */
+#define bus_space_copy_region_8 !!! bus_space_copy_region_8 unimplemented !!!
+#endif
+
+/*
+ * Bus read/write barrier methods.
+ *
+ * void bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t bsh,
+ * bus_size_t offset, bus_size_t len, int flags);
+ *
+ */
+#define BUS_SPACE_BARRIER_READ 0x01 /* force read barrier */
+#define BUS_SPACE_BARRIER_WRITE 0x02 /* force write barrier */
+
+static __inline void
+bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, bus_size_t len, int flags)
+{
+ ia64_mf();
+}
+
+
+/*
+ * Flags used in various bus DMA methods.
+ */
+#define BUS_DMA_WAITOK 0x00 /* safe to sleep (pseudo-flag) */
+#define BUS_DMA_NOWAIT 0x01 /* not safe to sleep */
+#define BUS_DMA_ALLOCNOW 0x02 /* perform resource allocation now */
+#define BUS_DMAMEM_NOSYNC 0x04 /* map memory to not require sync */
+#define BUS_DMA_ISA 0x10 /* map memory for ISA dma */
+#define BUS_DMA_BUS2 0x20 /* placeholders for bus functions... */
+#define BUS_DMA_BUS3 0x40
+#define BUS_DMA_BUS4 0x80
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+
+/*
+ * bus_dmasync_op_t
+ *
+ * Operations performed by bus_dmamap_sync().
+ */
+typedef enum {
+ BUS_DMASYNC_PREREAD,
+ BUS_DMASYNC_POSTREAD,
+ BUS_DMASYNC_PREWRITE,
+ BUS_DMASYNC_POSTWRITE
+} bus_dmasync_op_t;
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the characteristics
+ * of how to perform DMA mappings. This structure encapsultes
+ * information concerning address and alignment restrictions, number
+ * of S/G segments, amount of data per S/G segment, etc.
+ */
+typedef struct bus_dma_tag *bus_dma_tag_t;
+
+/*
+ * bus_dmamap_t
+ *
+ * DMA mapping instance information.
+ */
+typedef struct bus_dmamap *bus_dmamap_t;
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+typedef struct bus_dma_segment {
+ bus_addr_t ds_addr; /* DMA address */
+ bus_size_t ds_len; /* length of transfer */
+} bus_dma_segment_t;
+
+/*
+ * A function that returns 1 if the address cannot be accessed by
+ * a device and 0 if it can be.
+ */
+typedef int bus_dma_filter_t(void *, bus_addr_t);
+
+/*
+ * Allocate a device specific dma_tag encapsulating the constraints of
+ * the parent tag in addition to other restrictions specified:
+ *
+ * alignment: alignment for segments.
+ * boundary: Boundary that segments cannot cross.
+ * lowaddr: Low restricted address that cannot appear in a mapping.
+ * highaddr: High restricted address that cannot appear in a mapping.
+ * filtfunc: An optional function to further test if an address
+ * within the range of lowaddr and highaddr cannot appear
+ * in a mapping.
+ * filtfuncarg: An argument that will be passed to filtfunc in addition
+ * to the address to test.
+ * maxsize: Maximum mapping size supported by this tag.
+ * nsegments: Number of discontinuities allowed in maps.
+ * maxsegsz: Maximum size of a segment in the map.
+ * flags: Bus DMA flags.
+ * dmat: A pointer to set to a valid dma tag should the return
+ * value of this function indicate success.
+ */
+/* XXX Should probably allow specification of alignment */
+int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignemnt,
+ bus_size_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filtfunc,
+ void *filtfuncarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat);
+
+int bus_dma_tag_destroy(bus_dma_tag_t dmat);
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map);
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp);
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc.
+ */
+void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map);
+
+/*
+ * A function that processes a successfully loaded dma map or an error
+ * from a delayed load map.
+ */
+typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags);
+
+/*
+ * Perform a syncronization operation on the given map.
+ */
+void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
+#define bus_dmamap_sync(dmat, dmamap, op) \
+ if ((dmamap) != NULL) \
+ _bus_dmamap_sync(dmat, dmamap, op)
+
+/*
+ * Release the mapping held by map.
+ */
+void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map);
+#define bus_dmamap_unload(dmat, dmamap) \
+ if ((dmamap) != NULL) \
+ _bus_dmamap_unload(dmat, dmamap)
+
+#endif /* _MACHINE_BUS_H_ */
diff --git a/sys/ia64/include/bus_memio.h b/sys/ia64/include/bus_memio.h
new file mode 100644
index 0000000..a3b5e9e
--- /dev/null
+++ b/sys/ia64/include/bus_memio.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1997 Justin Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BUS_MEMIO_H_
+#define _MACHINE_BUS_MEMIO_H_
+#endif /* _MACHINE_BUS_MEMIO_H_ */
diff --git a/sys/ia64/include/bus_pio.h b/sys/ia64/include/bus_pio.h
new file mode 100644
index 0000000..00f6e3d
--- /dev/null
+++ b/sys/ia64/include/bus_pio.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1997 Justin Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BUS_PIO_H_
+#define _MACHINE_BUS_PIO_H_
+#endif /* _MACHINE_BUS_PIO_H_ */
diff --git a/sys/ia64/include/clock.h b/sys/ia64/include/clock.h
new file mode 100644
index 0000000..a5f1b62
--- /dev/null
+++ b/sys/ia64/include/clock.h
@@ -0,0 +1,25 @@
+/*
+ * Kernel interface to machine-dependent clock driver.
+ * Garrett Wollman, September 1994.
+ * This file is in the public domain.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CLOCK_H_
+#define _MACHINE_CLOCK_H_
+
+#ifdef _KERNEL
+
+extern int disable_rtc_set;
+extern int wall_cmos_clock;
+extern int adjkerntz;
+
+void DELAY __P((int usec));
+int sysbeep __P((int pitch, int period));
+int acquire_timer2 __P((int mode));
+int release_timer2 __P((void));
+
+#endif
+
+#endif /* !_MACHINE_CLOCK_H_ */
diff --git a/sys/ia64/include/clockvar.h b/sys/ia64/include/clockvar.h
new file mode 100644
index 0000000..f13d1df
--- /dev/null
+++ b/sys/ia64/include/clockvar.h
@@ -0,0 +1,53 @@
+/* $FreeBSD$ */
+/* $NetBSD: clockvar.h,v 1.4 1997/06/22 08:02:18 jonathan Exp $ */
+
+/*
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * Definitions for cpu-independent clock handling for the alpha and pmax.
+ */
+
+/*
+ * clocktime structure:
+ *
+ * structure passed to TOY clocks when setting them. broken out this
+ * way, so that the time_t -> field conversion can be shared.
+ */
+struct clocktime {
+ int year; /* year - 1900 */
+ int mon; /* month (1 - 12) */
+ int day; /* day (1 - 31) */
+ int hour; /* hour (0 - 23) */
+ int min; /* minute (0 - 59) */
+ int sec; /* second (0 - 59) */
+ int dow; /* day of week (0 - 6; 0 = Sunday) */
+};
+
+#include "clock_if.h"
+
+void clockattach(device_t);
diff --git a/sys/ia64/include/console.h b/sys/ia64/include/console.h
new file mode 100644
index 0000000..fe7f10e
--- /dev/null
+++ b/sys/ia64/include/console.h
@@ -0,0 +1,10 @@
+/* $FreeBSD$ */
+
+#ifndef _MACHINE_CONSOLE_H_
+#define _MACHINE_CONSOLE_H_
+
+#include <sys/fbio.h>
+#include <sys/kbio.h>
+#include <sys/consio.h>
+
+#endif /* !_MACHINE_CONSOLE_H_ */
diff --git a/sys/ia64/include/cpu.h b/sys/ia64/include/cpu.h
new file mode 100644
index 0000000..3da3331
--- /dev/null
+++ b/sys/ia64/include/cpu.h
@@ -0,0 +1,174 @@
+/* $FreeBSD$ */
+/* From: NetBSD: cpu.h,v 1.18 1997/09/23 23:17:49 mjacob Exp */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: cpu.h 1.16 91/03/25$
+ *
+ * @(#)cpu.h 8.4 (Berkeley) 1/5/94
+ */
+
+#ifndef _MACHINE_CPU_H_
+#define _MACHINE_CPU_H_
+
+/*
+ * Exported definitions unique to Alpha cpu support.
+ */
+
+#include <machine/frame.h>
+
+/*
+ * Arguments to hardclock and gatherstats encapsulate the previous
+ * machine state in an opaque clockframe. One the Alpha, we use
+ * what we push on an interrupt (a trapframe).
+ */
+struct clockframe {
+ struct trapframe cf_tf;
+};
+#define CLKF_USERMODE(framep) \
+ (((framep)->cf_tf.tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_USER)
+#define CLKF_BASEPRI(framep) \
+ (((framep)->cf_tf.tf_cr_ipsr & IA64_PSR_I) == 0)
+#define CLKF_PC(framep) ((framep)->cf_tf.tf_cr_iip)
+#define CLKF_INTR(framep) (intr_nesting_level >= 2)
+
+/*
+ * Preempt the current process if in interrupt from user mode,
+ * or after the current trap/syscall if in system mode.
+ */
+#define need_resched() do { want_resched = 1; aston(); } while (0)
+
+#define resched_wanted() want_resched
+
+/*
+ * Give a profiling tick to the current process when the user profiling
+ * buffer pages are invalid. On the hp300, request an ast to send us
+ * through trap, marking the proc as needing a profiling tick.
+ */
+#define need_proftick(p) \
+ do { (p)->p_flag |= P_OWEUPC; aston(); } while (0)
+
+/*
+ * Notify the current process (p) that it has a signal pending,
+ * process as soon as possible.
+ */
+#define signotify(p) aston()
+
+#define aston() PCPU_SET(astpending, 1)
+
+#ifdef _KERNEL
+extern u_int32_t intr_nesting_level; /* bookeeping only; counts sw intrs */
+extern u_int32_t want_resched; /* resched() was called */
+#endif
+
+
+/*
+ * CTL_MACHDEP definitions.
+ */
+#define CPU_CONSDEV 1 /* dev_t: console terminal device */
+#define CPU_ROOT_DEVICE 2 /* string: root device name */
+#define CPU_UNALIGNED_PRINT 3 /* int: print unaligned accesses */
+#define CPU_UNALIGNED_FIX 4 /* int: fix unaligned accesses */
+#define CPU_UNALIGNED_SIGBUS 5 /* int: SIGBUS unaligned accesses */
+#define CPU_BOOTED_KERNEL 6 /* string: booted kernel name */
+#define CPU_ADJKERNTZ 7 /* int: timezone offset (seconds) */
+#define CPU_DISRTCSET 8 /* int: disable resettodr() call */
+#define CPU_WALLCLOCK 9 /* int: indicates wall CMOS clock */
+#define CPU_MAXID 9 /* 9 valid machdep IDs */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "console_device", CTLTYPE_STRUCT }, \
+ { "root_device", CTLTYPE_STRING }, \
+ { "unaligned_print", CTLTYPE_INT }, \
+ { "unaligned_fix", CTLTYPE_INT }, \
+ { "unaligned_sigbus", CTLTYPE_INT }, \
+ { "booted_kernel", CTLTYPE_STRING }, \
+ { "adjkerntz", CTLTYPE_INT }, \
+ { "disable_rtc_set", CTLTYPE_INT }, \
+ { "wall_cmos_clock", CTLTYPE_INT }, \
+}
+
+#ifdef _KERNEL
+
+struct pcb;
+struct proc;
+struct reg;
+struct rpb;
+struct trapframe;
+
+extern struct proc *fpcurproc;
+extern struct rpb *hwrpb;
+extern volatile int mc_expected, mc_received;
+
+void ast __P((struct trapframe *));
+int badaddr __P((void *, size_t));
+int badaddr_read __P((void *, size_t, void *));
+void child_return __P((struct proc *p));
+u_int64_t console_restart __P((u_int64_t, u_int64_t, u_int64_t));
+void do_sir __P((void));
+void dumpconf __P((void));
+void exception_return __P((void)); /* MAGIC */
+void frametoreg __P((struct trapframe *, struct reg *));
+long fswintrberr __P((void)); /* MAGIC */
+int ia64_pa_access __P((u_long));
+void ia64_init __P((void));
+void ia64_fpstate_check __P((struct proc *p));
+void ia64_fpstate_save __P((struct proc *p, int write));
+void ia64_fpstate_drop __P((struct proc *p));
+void ia64_fpstate_switch __P((struct proc *p));
+void init_prom_interface __P((struct rpb*));
+void interrupt
+ __P((unsigned long, unsigned long, unsigned long, struct trapframe *));
+void machine_check
+ __P((unsigned long, struct trapframe *, unsigned long, unsigned long));
+u_int64_t hwrpb_checksum __P((void));
+void hwrpb_restart_setup __P((void));
+void regdump __P((struct trapframe *));
+void regtoframe __P((struct reg *, struct trapframe *));
+int savectx __P((struct pcb *));
+void restorectx __P((struct pcb *));
+void set_iointr __P((void (*)(void *, unsigned long)));
+void switch_exit __P((struct proc *)); /* MAGIC */
+void switch_trampoline __P((void)); /* MAGIC */
+void syscall __P((u_int64_t, struct trapframe *));
+void trap __P((int vector, struct trapframe *framep));
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_CPU_H_ */
diff --git a/sys/ia64/include/cpufunc.h b/sys/ia64/include/cpufunc.h
new file mode 100644
index 0000000..3128f70
--- /dev/null
+++ b/sys/ia64/include/cpufunc.h
@@ -0,0 +1,197 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define _MACHINE_CPUFUNC_H_
+
+#ifdef _KERNEL
+
+#include <sys/types.h>
+
+#ifdef __GNUC__
+
+static __inline void
+breakpoint(void)
+{
+ __asm __volatile("break 0x80100"); /* XXX use linux value */
+}
+
+#endif
+
+static __inline u_int8_t
+inb(u_int port)
+{
+ return 0; /* TODO: implement this */
+}
+
+static __inline u_int16_t
+inw(u_int port)
+{
+ return 0; /* TODO: implement this */
+}
+
+static __inline u_int32_t
+inl(u_int port)
+{
+ return 0; /* TODO: implement this */
+}
+
+static __inline void
+insb(u_int port, void *addr, size_t count)
+{
+ u_int8_t *p = addr;
+ while (count--)
+ *p++ = inb(port);
+}
+
+static __inline void
+insw(u_int port, void *addr, size_t count)
+{
+ u_int16_t *p = addr;
+ while (count--)
+ *p++ = inw(port);
+}
+
+static __inline void
+insl(u_int port, void *addr, size_t count)
+{
+ u_int32_t *p = addr;
+ while (count--)
+ *p++ = inl(port);
+}
+
+static __inline void
+outb(u_int port, u_int8_t data)
+{
+ return; /* TODO: implement this */
+}
+
+static __inline void
+outw(u_int port, u_int16_t data)
+{
+ return; /* TODO: implement this */
+}
+
+static __inline void
+outl(u_int port, u_int32_t data)
+{
+ return; /* TODO: implement this */
+}
+
+static __inline void
+outsb(u_int port, const void *addr, size_t count)
+{
+ const u_int8_t *p = addr;
+ while (count--)
+ outb(port, *p++);
+}
+
+static __inline void
+outsw(u_int port, const void *addr, size_t count)
+{
+ const u_int16_t *p = addr;
+ while (count--)
+ outw(port, *p++);
+}
+
+static __inline void
+outsl(u_int port, const void *addr, size_t count)
+{
+ const u_int32_t *p = addr;
+ while (count--)
+ outl(port, *p++);
+}
+
+static __inline u_int8_t
+readb(u_int addr)
+{
+ return 0; /* TODO: implement this */
+}
+
+static __inline u_int16_t
+readw(u_int addr)
+{
+ return 0; /* TODO: implement this */
+}
+
+static __inline u_int32_t
+readl(u_int addr)
+{
+ return 0; /* TODO: implement this */
+}
+
+static __inline void
+writeb(u_int addr, u_int8_t data)
+{
+ return; /* TODO: implement this */
+}
+
+static __inline void
+writew(u_int addr, u_int16_t data)
+{
+ return; /* TODO: implement this */
+}
+
+static __inline void
+writel(u_int addr, u_int32_t data)
+{
+ return; /* TODO: implement this */
+}
+
+/*
+ * Bogus interrupt manipulation
+ */
+static __inline void
+disable_intr(void)
+{
+ __asm __volatile ("rsm psr.i;;");
+}
+
+static __inline void
+enable_intr(void)
+{
+ __asm __volatile (";; ssm psr.i;; srlz.d");
+}
+
+static __inline u_int
+save_intr(void)
+{
+ u_int psr;
+ __asm __volatile ("mov %0=psr;;" : "=r" (psr));
+ return psr;
+}
+
+static __inline void
+restore_intr(u_int psr)
+{
+ __asm __volatile ("mov psr.l=%0;; srlz.d" :: "r" (psr));
+}
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/ia64/include/db_machdep.h b/sys/ia64/include/db_machdep.h
new file mode 100644
index 0000000..1517a54
--- /dev/null
+++ b/sys/ia64/include/db_machdep.h
@@ -0,0 +1,91 @@
+/* $FreeBSD$ */
+/* $NetBSD: db_machdep.h,v 1.6 1997/09/06 02:02:25 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _ALPHA_DB_MACHDEP_H_
+#define _ALPHA_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+#include <sys/param.h>
+#include <vm/vm.h>
+#include <machine/frame.h>
+
+#define DB_NO_AOUT
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef long db_expr_t; /* expression - signed */
+
+typedef struct trapframe db_regs_t;
+db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->tf_regs[FRAME_PC])
+
+#define BKPT_INST 0x00000080 /* breakpoint instruction */
+#define BKPT_SIZE (4) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK \
+ (ddb_regs.tf_regs[FRAME_PC] -= BKPT_SIZE);
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == ALPHA_KENTRY_IF && \
+ (code) == ALPHA_IF_CODE_BPT)
+#define IS_WATCHPOINT_TRAP(type, code) 0
+
+/*
+ * Functions needed for software single-stepping.
+ */
+
+/* No delay slots on Alpha. */
+#define next_instr_address(v, b) ((db_addr_t) ((b) ? (v) : ((v) + 4)))
+
+u_long db_register_value __P((db_regs_t *, int));
+int kdb_trap __P((unsigned long, unsigned long, unsigned long,
+ unsigned long, struct trapframe *));
+
+/*
+ * Pretty arbitrary
+ */
+#define DB_SMALL_VALUE_MAX 0x7fffffff
+#define DB_SMALL_VALUE_MIN (-0x400001)
+
+/*
+ * We define some of our own commands.
+ */
+#define DB_MACHINE_COMMANDS
+
+/*
+ * We use Elf64 symbols in DDB.
+ */
+#define DB_ELFSIZE 64
+
+#endif /* _ALPHA_DB_MACHDEP_H_ */
diff --git a/sys/ia64/include/efi.h b/sys/ia64/include/efi.h
new file mode 100644
index 0000000..077fe89
--- /dev/null
+++ b/sys/ia64/include/efi.h
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_EFI_H_
+#define _MACHINE_EFI_H_
+
+/*
+ * Memory types.
+ */
+#define EFI_RESERVED_MEMORY_TYPE 0
+#define EFI_LOADER_CODE 1
+#define EFI_LOADER_DATA 2
+#define EFI_BOOT_SERVICES_CODE 3
+#define EFI_BOOT_SERVICES_DATA 4
+#define EFI_RUNTIME_SERVICES_CODE 5
+#define EFI_RUNTIME_SERVICES_DATA 6
+#define EFI_CONVENTIONAL_MEMORY 7
+#define EFI_UNUSABLE_MEMORY 8
+#define EFI_ACPI_RECLAIM_MEMORY 9
+#define EFI_ACPI_MEMORY_NVS 10
+#define EFI_MEMORY_MAPPED_IO 11
+#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
+#define EFI_PAL_CODE 13
+
+struct efi_memory_descriptor {
+ u_int32_t emd_type;
+ vm_offset_t emd_physical_start;
+ vm_offset_t emd_virtul_start;
+ u_int64_t emd_number_of_pages;
+ u_int64_t emd_attribute;
+};
+
+/*
+ * Values for emd_attribute.
+ */
+#define EFI_MEMORY_UC 0x0000000000000001
+#define EFI_MEMORY_WC 0x0000000000000002
+#define EFI_MEMORY_WT 0x0000000000000004
+#define EFI_MEMORY_WB 0x0000000000000008
+#define EFI_MEMORY_UCE 0x0000000000000010
+#define EFI_MEMORY_WP 0x0000000000001000
+#define EFI_MEMORY_RP 0x0000000000002000
+#define EFI_MEMORY_XP 0x0000000000004000
+#define EFI_MEMORY_RUNTIME 0x8000000000000000
+
+#endif /* _MACHINE_EFI_H_ */
diff --git a/sys/ia64/include/elf.h b/sys/ia64/include/elf.h
new file mode 100644
index 0000000..3ed5220
--- /dev/null
+++ b/sys/ia64/include/elf.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 1996-1997 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ELF_H_
+#define _MACHINE_ELF_H_ 1
+
+/*
+ * ELF definitions for the IA-64 architecture.
+ */
+
+#include <sys/elf64.h> /* Definitions common to all 64 bit architectures. */
+
+#define __ELF_WORD_SIZE 64 /* Used by <sys/elf_generic.h> */
+#include <sys/elf_generic.h>
+
+#define ELF_ARCH EM_IA_64
+
+#define ELF_MACHINE_OK(x) ((x) == EM_IA_64)
+
+/*
+ * Auxiliary vector entries for passing information to the interpreter.
+ *
+ * The i386 supplement to the SVR4 ABI specification names this "auxv_t",
+ * but POSIX lays claim to all symbols ending with "_t".
+ */
+
+typedef struct { /* Auxiliary vector entry on initial stack */
+ int a_type; /* Entry type. */
+ union {
+ long a_val; /* Integer value. */
+ void *a_ptr; /* Address. */
+ void (*a_fcn)(void); /* Function pointer (not used). */
+ } a_un;
+} Elf64_Auxinfo;
+
+__ElfType(Auxinfo);
+
+/* Values for a_type. */
+#define AT_NULL 0 /* Terminates the vector. */
+#define AT_IGNORE 1 /* Ignored entry. */
+#define AT_EXECFD 2 /* File descriptor of program to load. */
+#define AT_PHDR 3 /* Program header of program already loaded. */
+#define AT_PHENT 4 /* Size of each program header entry. */
+#define AT_PHNUM 5 /* Number of program header entries. */
+#define AT_PAGESZ 6 /* Page size in bytes. */
+#define AT_BASE 7 /* Interpreter's base address. */
+#define AT_FLAGS 8 /* Flags (unused for i386). */
+#define AT_ENTRY 9 /* Where interpreter should transfer control. */
+
+/*
+ * The following non-standard values are used for passing information
+ * from John Polstra's testbed program to the dynamic linker. These
+ * are expected to go away soon.
+ *
+ * Unfortunately, these overlap the Linux non-standard values, so they
+ * must not be used in the same context.
+ */
+#define AT_BRK 10 /* Starting point for sbrk and brk. */
+#define AT_DEBUG 11 /* Debugging level. */
+
+/*
+ * The following non-standard values are used in Linux ELF binaries.
+ */
+#define AT_NOTELF 10 /* Program is not ELF ?? */
+#define AT_UID 11 /* Real uid. */
+#define AT_EUID 12 /* Effective uid. */
+#define AT_GID 13 /* Real gid. */
+#define AT_EGID 14 /* Effective gid. */
+
+#define AT_COUNT 15 /* Count of defined aux entry types. */
+
+/*
+ * Values for e_flags.
+ */
+#define EF_IA_64_MASKOS 0x00ff000f
+#define EF_IA_64_ABI64 0x00000010
+#define EF_IA_64_REDUCEDFP 0x00000020
+#define EF_IA_64_CONS_GP 0x00000040
+#define EF_IA_64_NOFUNCDESC_CONS_GP 0x00000080
+#define EF_IA_64_ABSOLUTE 0x00000100
+#define EF_IA_64_ARCH 0xff000000
+
+/*
+ * Section types.
+ */
+#define SHT_IA_64_EXT 0x70000000
+#define SHT_IA_64_UNWIND 0x70000001
+#define SHT_IA_64_LOPSREG 0x78000000
+#define SHT_IA_64_HIPSREG 0x7fffffff
+
+/*
+ * Section attribute flags.
+ */
+#define SHF_IA_64_SHORT 0x10000000
+#define SHF_IA_64_NORECOV 0x20000000
+
+/*
+ * Relocation types.
+ */
+
+/* Name Value Field Calculation */
+#define R_IA64_NONE 0 /* None */
+#define R_IA64_IMM14 0x21 /* immediate14 S + A */
+#define R_IA64_IMM22 0x22 /* immediate22 S + A */
+#define R_IA64_IMM64 0x23 /* immediate64 S + A */
+#define R_IA64_DIR32MSB 0x24 /* word32 MSB S + A */
+#define R_IA64_DIR32LSB 0x25 /* word32 LSB S + A */
+#define R_IA64_DIR64MSB 0x26 /* word64 MSB S + A */
+#define R_IA64_DIR64LSB 0x27 /* word64 LSB S + A */
+#define R_IA64_GPREL22 0x2a /* immediate22 @gprel(S + A) */
+#define R_IA64_GPREL64I 0x2b /* immediate64 @gprel(S + A) */
+#define R_IA64_GPREL64MSB 0x2e /* word64 MSB @gprel(S + A) */
+#define R_IA64_GPREL64LSB 0x2f /* word64 LSB @gprel(S + A) */
+#define R_IA64_LTOFF22 0x32 /* immediate22 @ltoff(S + A) */
+#define R_IA64_LTOFF64I 0x33 /* immediate64 @ltoff(S + A) */
+#define R_IA64_PLTOFF22 0x3a /* immediate22 @pltoff(S + A) */
+#define R_IA64_PLTOFF64I 0x3b /* immediate64 @pltoff(S + A) */
+#define R_IA64_PLTOFF64MSB 0x3e /* word64 MSB @pltoff(S + A) */
+#define R_IA64_PLTOFF64LSB 0x3f /* word64 LSB @pltoff(S + A) */
+#define R_IA64_FPTR64I 0x43 /* immediate64 @fptr(S + A) */
+#define R_IA64_FPTR32MSB 0x44 /* word32 MSB @fptr(S + A) */
+#define R_IA64_FPTR32LSB 0x45 /* word32 LSB @fptr(S + A) */
+#define R_IA64_FPTR64MSB 0x46 /* word64 MSB @fptr(S + A) */
+#define R_IA64_FPTR64LSB 0x47 /* word64 LSB @fptr(S + A) */
+#define R_IA64_PCREL21B 0x49 /* immediate21 form1 S + A - P */
+#define R_IA64_PCREL21M 0x4a /* immediate21 form2 S + A - P */
+#define R_IA64_PCREL21F 0x4b /* immediate21 form3 S + A - P */
+#define R_IA64_PCREL32MSB 0x4c /* word32 MSB S + A - P */
+#define R_IA64_PCREL32LSB 0x4d /* word32 LSB S + A - P */
+#define R_IA64_PCREL64MSB 0x4e /* word64 MSB S + A - P */
+#define R_IA64_PCREL64LSB 0x4f /* word64 LSB S + A - P */
+#define R_IA64_LTOFF_FPTR22 0x52 /* immediate22 @ltoff(@fptr(S + A)) */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* immediate64 @ltoff(@fptr(S + A)) */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* word32 MSB @ltoff(@fptr(S + A)) */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* word32 LSB @ltoff(@fptr(S + A)) */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* word64 MSB @ltoff(@fptr(S + A)) */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* word64 LSB @ltoff(@fptr(S + A)) */
+#define R_IA64_SEGREL32MSB 0x5c /* word32 MSB @segrel(S + A) */
+#define R_IA64_SEGREL32LSB 0x5d /* word32 LSB @segrel(S + A) */
+#define R_IA64_SEGREL64MSB 0x5e /* word64 MSB @segrel(S + A) */
+#define R_IA64_SEGREL64LSB 0x5f /* word64 LSB @segrel(S + A) */
+#define R_IA64_SECREL32MSB 0x64 /* word32 MSB @secrel(S + A) */
+#define R_IA64_SECREL32LSB 0x65 /* word32 LSB @secrel(S + A) */
+#define R_IA64_SECREL64MSB 0x66 /* word64 MSB @secrel(S + A) */
+#define R_IA64_SECREL64LSB 0x67 /* word64 LSB @secrel(S + A) */
+#define R_IA64_REL32MSB 0x6c /* word32 MSB BD + A */
+#define R_IA64_REL32LSB 0x6d /* word32 LSB BD + A */
+#define R_IA64_REL64MSB 0x6e /* word64 MSB BD + A */
+#define R_IA64_REL64LSB 0x6f /* word64 LSB BD + A */
+#define R_IA64_LTV32MSB 0x74 /* word32 MSB S + A */
+#define R_IA64_LTV32LSB 0x75 /* word32 LSB S + A */
+#define R_IA64_LTV64MSB 0x76 /* word64 MSB S + A */
+#define R_IA64_LTV64LSB 0x77 /* word64 LSB S + A */
+#define R_IA64_IPLTMSB 0x80 /* function descriptor MSB special */
+#define R_IA64_IPLTLSB 0x81 /* function descriptor LSB speciaal */
+#define R_IA64_SUB 0x85 /* immediate64 A - S */
+#define R_IA64_LTOFF22X 0x86 /* immediate22 special */
+#define R_IA64_LDXMOV 0x87 /* immediate22 special */
+
+/* Define "machine" characteristics */
+#define ELF_TARG_CLASS ELFCLASS64
+#define ELF_TARG_DATA ELFDATA2LSB
+#define ELF_TARG_MACH EM_IA_64
+#define ELF_TARG_VER 1
+
+#ifdef _KERNEL
+
+/*
+ * On the ia64 we load the dynamic linker where a userland call
+ * to mmap(0, ...) would put it. The rationale behind this
+ * calculation is that it leaves room for the heap to grow to
+ * its maximum allowed size.
+ */
+#define ELF_RTLD_ADDR(vmspace) \
+ (round_page((vm_offset_t)(vmspace)->vm_daddr + MAXDSIZ))
+
+#endif /* _KERNEL */
+#endif /* !_MACHINE_ELF_H_ */
diff --git a/sys/ia64/include/endian.h b/sys/ia64/include/endian.h
new file mode 100644
index 0000000..67e4540
--- /dev/null
+++ b/sys/ia64/include/endian.h
@@ -0,0 +1,97 @@
+/* $FreeBSD$ */
+/* From: NetBSD: endian.h,v 1.5 1997/10/09 15:42:19 bouyer Exp */
+
+/*
+ * Copyright (c) 1987, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)endian.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _ENDIAN_H_
+#define _ENDIAN_H_
+
+/*
+ * Define the order of 32-bit words in 64-bit words.
+ */
+#define _QUAD_HIGHWORD 1
+#define _QUAD_LOWWORD 0
+
+#ifndef _POSIX_SOURCE
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
+#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#define BYTE_ORDER LITTLE_ENDIAN
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+typedef u_int32_t in_addr_t;
+typedef u_int16_t in_port_t;
+
+__BEGIN_DECLS
+in_addr_t htonl __P((in_addr_t));
+in_port_t htons __P((in_port_t));
+in_addr_t ntohl __P((in_addr_t));
+in_port_t ntohs __P((in_port_t));
+u_int16_t bswap16 __P((u_int16_t));
+u_int32_t bswap32 __P((u_int32_t));
+u_int64_t bswap64 __P((u_int64_t));
+__END_DECLS
+
+/*
+ * Macros for network/external number representation conversion.
+ */
+#if BYTE_ORDER == BIG_ENDIAN && !defined(lint)
+#define ntohl(x) (x)
+#define ntohs(x) (x)
+#define htonl(x) (x)
+#define htons(x) (x)
+
+#define NTOHL(x) (x)
+#define NTOHS(x) (x)
+#define HTONL(x) (x)
+#define HTONS(x) (x)
+
+#else
+
+#define NTOHL(x) (x) = ntohl((in_addr_t)x)
+#define NTOHS(x) (x) = ntohs((in_port_t)x)
+#define HTONL(x) (x) = htonl((in_addr_t)x)
+#define HTONS(x) (x) = htons((in_port_t)x)
+#endif
+#endif /* !_POSIX_SOURCE */
+#endif /* !_ENDIAN_H_ */
diff --git a/sys/ia64/include/exec.h b/sys/ia64/include/exec.h
new file mode 100644
index 0000000..52baf9e
--- /dev/null
+++ b/sys/ia64/include/exec.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define __LDPGSZ 4096
diff --git a/sys/ia64/include/float.h b/sys/ia64/include/float.h
new file mode 100644
index 0000000..0420ffb
--- /dev/null
+++ b/sys/ia64/include/float.h
@@ -0,0 +1,79 @@
+/* $FreeBSD$ */
+/* From: NetBSD: float.h,v 1.6 1997/07/17 21:36:03 thorpej Exp */
+
+/*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+int __flt_rounds __P((void));
+__END_DECLS
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS __flt_rounds()
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP -125 /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP -37 /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP -1021
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP -307
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+
+#define LDBL_MANT_DIG DBL_MANT_DIG
+#define LDBL_EPSILON DBL_EPSILON
+#define LDBL_DIG DBL_DIG
+#define LDBL_MIN_EXP DBL_MIN_EXP
+#define LDBL_MIN DBL_MIN
+#define LDBL_MIN_10_EXP DBL_MIN_10_EXP
+#define LDBL_MAX_EXP DBL_MAX_EXP
+#define LDBL_MAX DBL_MAX
+#define LDBL_MAX_10_EXP DBL_MAX_10_EXP
+
+#endif /* _MACHINE_FLOAT_H_ */
diff --git a/sys/ia64/include/floatingpoint.h b/sys/ia64/include/floatingpoint.h
new file mode 100644
index 0000000..e2faa5e
--- /dev/null
+++ b/sys/ia64/include/floatingpoint.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/ieeefp.h>
diff --git a/sys/ia64/include/fpu.h b/sys/ia64/include/fpu.h
new file mode 100644
index 0000000..f9d426c
--- /dev/null
+++ b/sys/ia64/include/fpu.h
@@ -0,0 +1,81 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FPU_H_
+#define _MACHINE_FPU_H_
+
+/*
+ * Floating point status register bits.
+ */
+
+#define IA64_FPSR_TRAP_VD 0x0000000000000001L
+#define IA64_FPSR_TRAP_DD 0x0000000000000002L
+#define IA64_FPSR_TRAP_ZD 0x0000000000000004L
+#define IA64_FPSR_TRAP_OD 0x0000000000000008L
+#define IA64_FPSR_TRAP_UD 0x0000000000000010L
+#define IA64_FPSR_TRAP_ID 0x0000000000000020L
+#define IA64_FPSR_SF(i,v) ((v) << ((i)*13+6))
+
+#define IA64_SF_FTZ 0x0001L
+#define IA64_SF_WRE 0x0002L
+#define IA64_SF_PC 0x000cL
+#define IA64_SF_PC_0 0x0000L
+#define IA64_SF_PC_1 0x0004L
+#define IA64_SF_PC_2 0x0008L
+#define IA64_SF_PC_3 0x000cL
+#define IA64_SF_RC 0x0030L
+#define IA64_SF_RC_NEAREST 0x0000L
+#define IA64_SF_RC_NEGINF 0x0010L
+#define IA64_SF_RC_POSINF 0x0020L
+#define IA64_SF_RC_TRUNC 0x0030L
+#define IA64_SF_TD 0x0040L
+#define IA64_SF_V 0x0080L
+#define IA64_SF_D 0x0100L
+#define IA64_SF_Z 0x0200L
+#define IA64_SF_O 0x0400L
+#define IA64_SF_U 0x0800L
+#define IA64_SF_I 0x1000L
+
+#define IA64_SF_DEFAULT (IA64_SF_PC_3 | IA64_SF_RC_NEAREST)
+
+#define IA64_FPSR_DEFAULT (IA64_FPSR_TRAP_VD \
+ | IA64_FPSR_TRAP_DD \
+ | IA64_FPSR_TRAP_ZD \
+ | IA64_FPSR_TRAP_OD \
+ | IA64_FPSR_TRAP_UD \
+ | IA64_FPSR_TRAP_ID \
+ | IA64_FPSR_SF(0, IA64_SF_DEFAULT) \
+ | IA64_FPSR_SF(1, (IA64_SF_DEFAULT \
+ | IA64_SF_TD \
+ | IA64_SF_WRE)) \
+ | IA64_FPSR_SF(2, (IA64_SF_DEFAULT \
+ | IA64_SF_TD)) \
+ | IA64_FPSR_SF(3, (IA64_SF_DEFAULT \
+ | IA64_SF_TD)))
+
+#endif /* ! _MACHINE_FPU_H_ */
diff --git a/sys/ia64/include/frame.h b/sys/ia64/include/frame.h
new file mode 100644
index 0000000..2304770
--- /dev/null
+++ b/sys/ia64/include/frame.h
@@ -0,0 +1,114 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FRAME_H_
+#define _MACHINE_FRAME_H_
+
+#include <machine/reg.h>
+
+/*
+ * Software trap, exception, and syscall frame.
+ *
+ * This is loosely based on the Linux pt_regs structure. When I
+ * understand things better, I might change it.
+ */
+struct trapframe {
+ u_int64_t tf_cr_iip;
+ u_int64_t tf_cr_ipsr;
+ u_int64_t tf_cr_isr;
+ u_int64_t tf_cr_ifa;
+ u_int64_t tf_pr;
+ u_int64_t tf_ar_rsc;
+ u_int64_t tf_ar_pfs;
+ u_int64_t tf_cr_ifs;
+ u_int64_t tf_ar_bspstore;
+ u_int64_t tf_ar_rnat;
+ u_int64_t tf_ar_bsp;
+ u_int64_t tf_ar_unat;
+ u_int64_t tf_ar_ccv;
+ u_int64_t tf_ar_fpsr;
+
+ u_int64_t tf_b[8];
+
+ u_int64_t tf_r[31]; /* don't need to save r0 */
+#define FRAME_R1 0
+#define FRAME_GP 0
+#define FRAME_R2 1
+#define FRAME_R3 2
+#define FRAME_R4 3
+#define FRAME_R5 4
+#define FRAME_R6 5
+#define FRAME_R7 6
+#define FRAME_R8 7
+#define FRAME_R9 8
+#define FRAME_R10 9
+#define FRAME_R11 10
+#define FRAME_R12 11
+#define FRAME_SP 11
+#define FRAME_R13 12
+#define FRAME_TP 12
+#define FRAME_R14 13
+#define FRAME_R15 14
+#define FRAME_R16 15
+#define FRAME_R17 16
+#define FRAME_R18 17
+#define FRAME_R19 18
+#define FRAME_R20 19
+#define FRAME_R21 20
+#define FRAME_R22 21
+#define FRAME_R23 22
+#define FRAME_R24 23
+#define FRAME_R25 24
+#define FRAME_R26 25
+#define FRAME_R27 26
+#define FRAME_R28 27
+#define FRAME_R29 28
+#define FRAME_R30 29
+#define FRAME_R31 30
+
+ u_int64_t tf_pad1;
+
+ /*
+ * We rely on the compiler to save/restore f2-f5 and
+ * f16-f31. We also tell the compiler to avoid f32-f127
+ * completely so we don't worry about them at all.
+ */
+ struct ia64_fpreg tf_f[10];
+#define FRAME_F6 0
+#define FRAME_F7 1
+#define FRAME_F8 2
+#define FRAME_F9 3
+#define FRAME_F10 3
+#define FRAME_F11 3
+#define FRAME_F12 3
+#define FRAME_F13 3
+#define FRAME_F14 3
+#define FRAME_F15 3
+};
+
+#endif /* _MACHINE_FRAME_H_ */
diff --git a/sys/ia64/include/globaldata.h b/sys/ia64/include/globaldata.h
new file mode 100644
index 0000000..87c9fe5
--- /dev/null
+++ b/sys/ia64/include/globaldata.h
@@ -0,0 +1,78 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_GLOBALDATA_H_
+#define _MACHINE_GLOBALDATA_H_
+
+#ifdef _KERNEL
+
+#include <sys/queue.h>
+
+/*
+ * This structure maps out the global data that needs to be kept on a
+ * per-cpu basis. genassym uses this to generate offsets for the assembler
+ * code, which also provides external symbols so that C can get at them as
+ * though they were really globals. This structure is pointed to by
+ * the per-cpu system value (see alpha_pal_rdval() and alpha_pal_wrval()).
+ * Inside the kernel, the globally reserved register t7 is used to
+ * point at the globaldata structure.
+ */
+struct globaldata {
+ struct proc *gd_curproc; /* current process */
+ struct proc *gd_idleproc; /* idle process */
+ struct proc *gd_fpcurproc; /* fp state owner */
+ struct pcb *gd_curpcb; /* current pcb */
+ struct timeval gd_switchtime;
+ int gd_switchticks;
+ u_int gd_cpuno; /* this cpu number */
+ u_int gd_other_cpus; /* all other cpus */
+ int gd_inside_intr;
+ u_int64_t gd_idlepcbphys; /* pa of gd_idlepcb */
+ u_int64_t gd_pending_ipis; /* pending IPI events */
+ u_int32_t gd_next_asn; /* next ASN to allocate */
+ u_int32_t gd_current_asngen; /* ASN rollover check */
+ u_int32_t gd_intr_nesting_level; /* interrupt recursion */
+
+ u_int gd_astpending;
+ SLIST_ENTRY(globaldata) gd_allcpu;
+#ifdef KTR_PERCPU
+ volatile int gd_ktr_idx; /* Index into trace table */
+ char *gd_ktr_buf;
+ char gd_ktr_buf_data[0];
+#endif
+};
+
+SLIST_HEAD(cpuhead, globaldata);
+extern struct cpuhead cpuhead;
+
+void globaldata_init(struct globaldata *pcpu, int cpuno, size_t sz);
+struct globaldata *globaldata_find(int cpuno);
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_GLOBALDATA_H_ */
diff --git a/sys/ia64/include/globals.h b/sys/ia64/include/globals.h
new file mode 100644
index 0000000..4046bff
--- /dev/null
+++ b/sys/ia64/include/globals.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_GLOBALS_H_
+#define _MACHINE_GLOBALS_H_
+
+#ifdef _KERNEL
+
+register struct globaldata *globalp __asm__("r13");
+
+#if 1
+#define GLOBALP globalp
+#else
+#define GLOBALP ((struct globaldata *) alpha_pal_rdval())
+#endif
+
+#define PCPU_GET(name) (GLOBALP->gd_##name)
+#define PCPU_SET(name,value) (GLOBALP->gd_##name = (value))
+
+/*
+ * The following set of macros works for UP kernel as well, but for maximum
+ * performance we allow the global variables to be accessed directly. On the
+ * other hand, kernel modules should always use these macros to maintain
+ * portability between UP and SMP kernels.
+ */
+#define CURPROC PCPU_GET(curproc)
+#define curproc PCPU_GET(curproc)
+#define idleproc PCPU_GET(idleproc)
+#define curpcb PCPU_GET(curpcb)
+#define fpcurproc PCPU_GET(fpcurproc)
+#define switchtime PCPU_GET(switchtime)
+#define switchticks PCPU_GET(switchticks)
+#define cpuid PCPU_GET(cpuno)
+#define prevproc PCPU_GET(curproc) /* XXX - until ithreads */
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_GLOBALS_H_ */
diff --git a/sys/ia64/include/ia64_cpu.h b/sys/ia64/include/ia64_cpu.h
new file mode 100644
index 0000000..ca205c2
--- /dev/null
+++ b/sys/ia64/include/ia64_cpu.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IA64_CPU_H_
+#define _MACHINE_IA64_CPU_H_
+
+/*
+ * Definition of PSR and IPSR bits.
+ */
+#define IA64_PSR_BE 0x0000000000000002
+#define IA64_PSR_UP 0x0000000000000004
+#define IA64_PSR_AC 0x0000000000000008
+#define IA64_PSR_MFL 0x0000000000000010
+#define IA64_PSR_MFH 0x0000000000000020
+#define IA64_PSR_IC 0x0000000000002000
+#define IA64_PSR_I 0x0000000000004000
+#define IA64_PSR_PK 0x0000000000008000
+#define IA64_PSR_DT 0x0000000000020000
+#define IA64_PSR_DFL 0x0000000000040000
+#define IA64_PSR_DFH 0x0000000000080000
+#define IA64_PSR_SP 0x0000000000100000
+#define IA64_PSR_PP 0x0000000000200000
+#define IA64_PSR_DI 0x0000000000400000
+#define IA64_PSR_SI 0x0000000000800000
+#define IA64_PSR_DB 0x0000000001000000
+#define IA64_PSR_LP 0x0000000002000000
+#define IA64_PSR_TB 0x0000000004000000
+#define IA64_PSR_RT 0x0000000008000000
+#define IA64_PSR_CPL 0x0000000300000000
+#define IA64_PSR_CPL_KERN 0x0000000000000000
+#define IA64_PSR_CPL_1 0x0000000100000000
+#define IA64_PSR_CPL_2 0x0000000200000000
+#define IA64_PSR_CPL_USER 0x0000000300000000
+#define IA64_PSR_IS 0x0000000400000000
+#define IA64_PSR_MC 0x0000000800000000
+#define IA64_PSR_IT 0x0000001000000000
+#define IA64_PSR_ID 0x0000002000000000
+#define IA64_PSR_DA 0x0000004000000000
+#define IA64_PSR_DD 0x0000008000000000
+#define IA64_PSR_SS 0x0000010000000000
+#define IA64_PSR_RI 0x0000060000000000
+#define IA64_PSR_RI_0 0x0000000000000000
+#define IA64_PSR_RI_1 0x0000020000000000
+#define IA64_PSR_RI_2 0x0000040000000000
+#define IA64_PSR_ED 0x0000080000000000
+#define IA64_PSR_BN 0x0000100000000000
+#define IA64_PSR_IA 0x0000200000000000
+
+/*
+ * Definition of ISR bits.
+ */
+#define IA64_ISR_CODE 0x000000000000ffff
+#define IA64_ISR_VECTOR 0x0000000000ff0000
+#define IA64_ISR_X 0x0000000100000000
+#define IA64_ISR_W 0x0000000200000000
+#define IA64_ISR_R 0x0000000400000000
+#define IA64_ISR_NA 0x0000000800000000
+#define IA64_ISR_SP 0x0000001000000000
+#define IA64_ISR_RS 0x0000002000000000
+#define IA64_ISR_IR 0x0000004000000000
+#define IA64_ISR_NI 0x0000008000000000
+#define IA64_ISR_SO 0x0000010000000000
+#define IA64_ISR_EI 0x0000060000000000
+#define IA64_ISR_EI_0 0x0000000000000000
+#define IA64_ISR_EI_1 0x0000020000000000
+#define IA64_ISR_EI_2 0x0000040000000000
+#define IA64_ISR_ED 0x0000080000000000
+
+/*
+ * Vector numbers for various ia64 interrupts.
+ */
+#define IA64_VEC_VHPT 0
+#define IA64_VEC_ITLB 1
+#define IA64_VEC_DTLB 2
+#define IA64_VEC_ALT_ITLB 3
+#define IA64_VEC_ALT_DTLB 4
+#define IA64_VEC_NESTED_DTLB 5
+#define IA64_VEC_IKEY_MISS 6
+#define IA64_VEC_DKEY_MISS 7
+#define IA64_VEC_DIRTY_BIT 8
+#define IA64_VEC_INST_ACCESS 9
+#define IA64_VEC_DATA_ACCESS 10
+#define IA64_VEC_BREAK 11
+#define IA64_VEC_EXT_INTR 12
+#define IA64_VEC_PAGE_NOT_PRESENT 20
+#define IA64_VEC_KEY_PERMISSION 21
+#define IA64_VEC_INST_ACCESS_RIGHTS 22
+#define IA64_VEC_DATA_ACCESS_RIGHTS 23
+#define IA64_VEC_GENERAL_EXCEPTION 24
+#define IA64_VEC_DISABLED_FP 25
+#define IA64_VEC_NAT_CONSUMPTION 26
+#define IA64_VEC_SPECULATION 27
+#define IA64_VEC_DEBUG 29
+#define IA64_VEC_UNALIGNED_REFERENCE 30
+#define IA64_VEC_UNSUPP_DATA_REFERENCE 31
+#define IA64_VEC_FLOATING_POINT_FAULT 32
+#define IA64_VEC_FLOATING_POINT_TRAP 33
+#define IA64_VEC_LOWER_PRIVILEGE_TRANSFER 34
+#define IA64_VEC_TAKEN_BRANCH_TRAP 35
+#define IA64_VEC_SINGLE_STEP_TRAP 36
+#define IA64_VEC_IA32_EXCEPTION 45
+#define IA64_VEC_IA32_INTERCEPT 46
+#define IA64_VEC_IA32_INTERRUPT 47
+
+/*
+ * Manipulating region bits of an address.
+ */
+#define IA64_RR_BASE(n) (((u_int64_t) (n)) << 61)
+#define IA64_RR_MASK(x) ((x) & ((1L << 61) - 1))
+
+#define IA64_PHYS_TO_RR6(x) ((x) | IA64_RR_BASE(6))
+#define IA64_PHYS_TO_RR7(x) ((x) | IA64_RR_BASE(7))
+
+/*
+ * Various special ia64 instructions.
+ */
+
+/*
+ * Memory Fence.
+ */
+static __inline void
+ia64_mf(void)
+{
+ __asm __volatile("mf");
+}
+
+/*
+ * Calculate address in VHPT for va.
+ */
+static __inline u_int64_t
+ia64_thash(u_int64_t va)
+{
+ u_int64_t result;
+ __asm __volatile("thash %0=%1" : "=r" (result) : "r" (va));
+ return result;
+}
+
+/*
+ * Calculate VHPT tag for va.
+ */
+static __inline u_int64_t
+ia64_ttag(u_int64_t va)
+{
+ u_int64_t result;
+ __asm __volatile("ttag %0=%1" : "=r" (result) : "r" (va));
+ return result;
+}
+
+/*
+ * Convert virtual address to physical.
+ */
+static __inline u_int64_t
+ia64_tpa(u_int64_t va)
+{
+ u_int64_t result;
+ __asm __volatile("tpa %0=%1" : "=r" (result) : "r" (va));
+ return result;
+}
+
+/*
+ * Read the value of ar.itc.
+ */
+static __inline u_int64_t
+ia64_read_itc(void)
+{
+ u_int64_t result;
+ __asm __volatile("mov %0=ar.itc" : "=r" (result));
+ return result;
+}
+
+static __inline void
+ia64_set_rr(u_int64_t rrbase, u_int64_t v)
+{
+ __asm __volatile("mov rr[%0]=%1" :: "r"(rrbase), "r"(v) : "memory");
+}
+
+#endif /* _MACHINE_IA64_CPU_H_ */
+
diff --git a/sys/ia64/include/ieee.h b/sys/ia64/include/ieee.h
new file mode 100644
index 0000000..654d6ee
--- /dev/null
+++ b/sys/ia64/include/ieee.h
@@ -0,0 +1,124 @@
+/* $FreeBSD$ */
+/* From: NetBSD: ieee.h,v 1.2 1997/04/06 08:47:27 cgd Exp */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ieee.h 8.1 (Berkeley) 6/11/93
+ *
+ * from: Header: ieee.h,v 1.7 92/11/26 02:04:37 torek Exp
+ */
+
+/*
+ * ieee.h defines the machine-dependent layout of the machine's IEEE
+ * floating point. It does *not* define (yet?) any of the rounding
+ * mode bits, exceptions, and so forth.
+ */
+
+/*
+ * Define the number of bits in each fraction and exponent.
+ *
+ * k k+1
+ * Note that 1.0 x 2 == 0.1 x 2 and that denorms are represented
+ *
+ * (-exp_bias+1)
+ * as fractions that look like 0.fffff x 2 . This means that
+ *
+ * -126
+ * the number 0.10000 x 2 , for instance, is the same as the normalized
+ *
+ * -127 -128
+ * float 1.0 x 2 . Thus, to represent 2 , we need one leading zero
+ *
+ * -129
+ * in the fraction; to represent 2 , we need two, and so on. This
+ *
+ * (-exp_bias-fracbits+1)
+ * implies that the smallest denormalized number is 2
+ *
+ * for whichever format we are talking about: for single precision, for
+ *
+ * -126 -149
+ * instance, we get .00000000000000000000001 x 2 , or 1.0 x 2 , and
+ *
+ * -149 == -127 - 23 + 1.
+ */
+#define SNG_EXPBITS 8
+#define SNG_FRACBITS 23
+
+#define DBL_EXPBITS 11
+#define DBL_FRACBITS 52
+
+struct ieee_single {
+ u_int sng_frac:23;
+ u_int sng_exp:8;
+ u_int sng_sign:1;
+};
+
+struct ieee_double {
+ u_int dbl_fracl;
+ u_int dbl_frach:20;
+ u_int dbl_exp:11;
+ u_int dbl_sign:1;
+};
+
+/*
+ * Floats whose exponent is in [1..INFNAN) (of whatever type) are
+ * `normal'. Floats whose exponent is INFNAN are either Inf or NaN.
+ * Floats whose exponent is zero are either zero (iff all fraction
+ * bits are zero) or subnormal values.
+ *
+ * A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
+ * high fraction; if the bit is set, it is a `quiet NaN'.
+ */
+#define SNG_EXP_INFNAN 255
+#define DBL_EXP_INFNAN 2047
+
+#if 0
+#define SNG_QUIETNAN (1 << 22)
+#define DBL_QUIETNAN (1 << 19)
+#endif
+
+/*
+ * Exponent biases.
+ */
+#define SNG_EXP_BIAS 127
+#define DBL_EXP_BIAS 1023
diff --git a/sys/ia64/include/ieeefp.h b/sys/ia64/include/ieeefp.h
new file mode 100644
index 0000000..d673507
--- /dev/null
+++ b/sys/ia64/include/ieeefp.h
@@ -0,0 +1,29 @@
+/* $FreeBSD$ */
+/* From: NetBSD: ieeefp.h,v 1.2 1997/04/06 08:47:28 cgd Exp */
+
+/*
+ * Written by J.T. Conklin, Apr 28, 1995
+ * Public domain.
+ */
+
+#ifndef _ALPHA_IEEEFP_H_
+#define _ALPHA_IEEEFP_H_
+
+typedef int fp_except_t;
+#define FP_X_INV (1LL << 1) /* invalid operation exception */
+#define FP_X_DZ (1LL << 2) /* divide-by-zero exception */
+#define FP_X_OFL (1LL << 3) /* overflow exception */
+#define FP_X_UFL (1LL << 4) /* underflow exception */
+#define FP_X_IMP (1LL << 5) /* imprecise(inexact) exception */
+#if 0
+#define FP_X_IOV (1LL << 6) /* integer overflow XXX? */
+#endif
+
+typedef enum {
+ FP_RZ=0, /* round to zero (truncate) */
+ FP_RM=1, /* round toward negative infinity */
+ FP_RN=2, /* round to nearest representable number */
+ FP_RP=3 /* round toward positive infinity */
+} fp_rnd_t;
+
+#endif /* _ALPHA_IEEEFP_H_ */
diff --git a/sys/ia64/include/in_cksum.h b/sys/ia64/include/in_cksum.h
new file mode 100644
index 0000000..7a98720
--- /dev/null
+++ b/sys/ia64/include/in_cksum.h
@@ -0,0 +1,80 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#include <sys/cdefs.h>
+
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __GNUC__
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+ int __tmpsum;
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define in_cksum_update(ip) \
+ do { \
+ int __tmpsum; \
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+ } while(0)
+
+#endif
+
+#ifdef _KERNEL
+u_int in_cksum_hdr(const struct ip *ip);
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/sys/ia64/include/intr.h b/sys/ia64/include/intr.h
new file mode 100644
index 0000000..4f00388
--- /dev/null
+++ b/sys/ia64/include/intr.h
@@ -0,0 +1,37 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_INTR_H_
+#define _MACHINE_INTR_H_
+
+int alpha_setup_intr(int vector, driver_intr_t *intr, void *arg,
+ void **cookiep, volatile long *cntp);
+int alpha_teardown_intr(void *cookie);
+void alpha_dispatch_intr(void *frame, unsigned long vector);
+
+#endif /* !_MACHINE_INTR_H_ */
diff --git a/sys/ia64/include/intrcnt.h b/sys/ia64/include/intrcnt.h
new file mode 100644
index 0000000..440f819
--- /dev/null
+++ b/sys/ia64/include/intrcnt.h
@@ -0,0 +1,79 @@
+/* $FreeBSD$ */
+/* $NetBSD: intrcnt.h,v 1.17 1998/11/19 01:48:04 ross Exp $ */
+
+/*
+ * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#define INTRCNT_CLOCK 0
+#define INTRCNT_ISA_IRQ (INTRCNT_CLOCK + 1)
+#define INTRCNT_ISA_IRQ_LEN 16
+#define INTRCNT_OTHER_BASE (INTRCNT_ISA_IRQ + INTRCNT_ISA_IRQ_LEN)
+#define INTRCNT_OTHER_LEN 48
+#define INTRCNT_COUNT (INTRCNT_OTHER_BASE + INTRCNT_OTHER_LEN)
+
+#define INTRCNT_A12_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_DEC_1000A_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_DEC_1000_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_DEC_2100_A500_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_DEC_550_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_EB164_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_EB64PLUS_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_EB66_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_IOASIC INTRCNT_OTHER_BASE
+#define INTRCNT_KN15 INTRCNT_OTHER_BASE
+#define INTRCNT_KN16 INTRCNT_OTHER_BASE
+#define INTRCNT_KN20AA_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_KN300_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_KN8AE_IRQ INTRCNT_OTHER_BASE
+#define INTRCNT_TCDS INTRCNT_OTHER_BASE
+
+#define INTRCNT_A12_IRQ_LEN 10
+#define INTRCNT_DEC_1000A_IRQ_LEN 32
+#define INTRCNT_DEC_1000_IRQ_LEN 16
+#define INTRCNT_DEC_2100_A500_IRQ_LEN 16
+#define INTRCNT_DEC_550_IRQ_LEN 48
+#define INTRCNT_EB164_IRQ_LEN 24
+#define INTRCNT_EB64PLUS_IRQ_LEN 32
+#define INTRCNT_EB66_IRQ_LEN 32
+#define INTRCNT_IOASIC_LEN 4
+#define INTRCNT_ISA_IRQ_LEN 16
+#define INTRCNT_KN15_LEN 9
+#define INTRCNT_KN16_LEN 5
+#define INTRCNT_KN20AA_IRQ_LEN 32
+#define INTRCNT_KN300_LEN 19
+#define INTRCNT_KN8AE_IRQ_LEN 2
+#define INTRCNT_TCDS_LEN 2
+
+# define INTRCNT_KN300_NCR810 INTRCNT_KN300_IRQ + 16
+# define INTRCNT_KN300_I2C_CTRL INTRCNT_KN300_IRQ + 17
+# define INTRCNT_KN300_I2C_BUS INTRCNT_KN300_IRQ + 18
+
+#ifdef _KERNEL
+#ifndef _LOCORE
+extern volatile long intrcnt[];
+#endif
+#endif
diff --git a/sys/ia64/include/ioctl_bt848.h b/sys/ia64/include/ioctl_bt848.h
new file mode 100644
index 0000000..4b6a8fb
--- /dev/null
+++ b/sys/ia64/include/ioctl_bt848.h
@@ -0,0 +1,288 @@
+/*
+ * extensions to ioctl_meteor.h for the bt848 cards
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IOCTL_BT848_H_
+#define _MACHINE_IOCTL_BT848_H_
+
+/*
+ * frequency sets
+ */
+#define CHNLSET_NABCST 1
+#define CHNLSET_CABLEIRC 2
+#define CHNLSET_CABLEHRC 3
+#define CHNLSET_WEUROPE 4
+#define CHNLSET_JPNBCST 5
+#define CHNLSET_JPNCABLE 6
+#define CHNLSET_XUSSR 7
+#define CHNLSET_AUSTRALIA 8
+#define CHNLSET_FRANCE 9
+#define CHNLSET_MIN CHNLSET_NABCST
+#define CHNLSET_MAX CHNLSET_FRANCE
+
+
+/*
+ * constants for various tuner registers
+ */
+#define BT848_HUEMIN (-90)
+#define BT848_HUEMAX 90
+#define BT848_HUECENTER 0
+#define BT848_HUERANGE 179.3
+#define BT848_HUEREGMIN (-128)
+#define BT848_HUEREGMAX 127
+#define BT848_HUESTEPS 256
+
+#define BT848_BRIGHTMIN (-50)
+#define BT848_BRIGHTMAX 50
+#define BT848_BRIGHTCENTER 0
+#define BT848_BRIGHTRANGE 99.6
+#define BT848_BRIGHTREGMIN (-128)
+#define BT848_BRIGHTREGMAX 127
+#define BT848_BRIGHTSTEPS 256
+
+#define BT848_CONTRASTMIN 0
+#define BT848_CONTRASTMAX 237
+#define BT848_CONTRASTCENTER 100
+#define BT848_CONTRASTRANGE 236.57
+#define BT848_CONTRASTREGMIN 0
+#define BT848_CONTRASTREGMAX 511
+#define BT848_CONTRASTSTEPS 512
+
+#define BT848_CHROMAMIN 0
+#define BT848_CHROMAMAX 284
+#define BT848_CHROMACENTER 100
+#define BT848_CHROMARANGE 283.89
+#define BT848_CHROMAREGMIN 0
+#define BT848_CHROMAREGMAX 511
+#define BT848_CHROMASTEPS 512
+
+#define BT848_SATUMIN 0
+#define BT848_SATUMAX 202
+#define BT848_SATUCENTER 100
+#define BT848_SATURANGE 201.18
+#define BT848_SATUREGMIN 0
+#define BT848_SATUREGMAX 511
+#define BT848_SATUSTEPS 512
+
+#define BT848_SATVMIN 0
+#define BT848_SATVMAX 284
+#define BT848_SATVCENTER 100
+#define BT848_SATVRANGE 283.89
+#define BT848_SATVREGMIN 0
+#define BT848_SATVREGMAX 511
+#define BT848_SATVSTEPS 512
+
+
+/*
+ * audio stuff
+ */
+#define AUDIO_TUNER 0x00 /* command for the audio routine */
+#define AUDIO_EXTERN 0x01 /* don't confuse them with bit */
+#define AUDIO_INTERN 0x02 /* settings */
+#define AUDIO_MUTE 0x80
+#define AUDIO_UNMUTE 0x81
+
+
+/*
+ * EEProm stuff
+ */
+struct eeProm {
+ short offset;
+ short count;
+ u_char bytes[ 256 ];
+};
+
+
+/*
+ * XXX: this is a hack, should be in ioctl_meteor.h
+ * here to avoid touching that file for now...
+ */
+#define TVTUNER_SETCHNL _IOW('x', 32, unsigned int) /* set channel */
+#define TVTUNER_GETCHNL _IOR('x', 32, unsigned int) /* get channel */
+#define TVTUNER_SETTYPE _IOW('x', 33, unsigned int) /* set tuner type */
+#define TVTUNER_GETTYPE _IOR('x', 33, unsigned int) /* get tuner type */
+#define TVTUNER_GETSTATUS _IOR('x', 34, unsigned int) /* get tuner status */
+#define TVTUNER_SETFREQ _IOW('x', 35, unsigned int) /* set frequency */
+#define TVTUNER_GETFREQ _IOR('x', 36, unsigned int) /* get frequency */
+
+
+#define BT848_SHUE _IOW('x', 37, int) /* set hue */
+#define BT848_GHUE _IOR('x', 37, int) /* get hue */
+#define BT848_SBRIG _IOW('x', 38, int) /* set brightness */
+#define BT848_GBRIG _IOR('x', 38, int) /* get brightness */
+#define BT848_SCSAT _IOW('x', 39, int) /* set chroma sat */
+#define BT848_GCSAT _IOR('x', 39, int) /* get UV saturation */
+#define BT848_SCONT _IOW('x', 40, int) /* set contrast */
+#define BT848_GCONT _IOR('x', 40, int) /* get contrast */
+#define BT848_SVSAT _IOW('x', 41, int) /* set chroma V sat */
+#define BT848_GVSAT _IOR('x', 41, int) /* get V saturation */
+#define BT848_SUSAT _IOW('x', 42, int) /* set chroma U sat */
+#define BT848_GUSAT _IOR('x', 42, int) /* get U saturation */
+
+#define BT848_SCBARS _IOR('x', 43, int) /* set colorbar */
+#define BT848_CCBARS _IOR('x', 44, int) /* clear colorbar */
+
+
+#define BT848_SAUDIO _IOW('x', 46, int) /* set audio channel */
+#define BT848_GAUDIO _IOR('x', 47, int) /* get audio channel */
+#define BT848_SBTSC _IOW('x', 48, int) /* set audio channel */
+
+#define BT848_GSTATUS _IOR('x', 49, unsigned int) /* reap status */
+
+#define BT848_WEEPROM _IOWR('x', 50, struct eeProm) /* write to EEProm */
+#define BT848_REEPROM _IOWR('x', 51, struct eeProm) /* read from EEProm */
+
+#define BT848_SIGNATURE _IOWR('x', 52, struct eeProm) /* read card sig */
+
+#define TVTUNER_SETAFC _IOW('x', 53, int) /* turn AFC on/off */
+#define TVTUNER_GETAFC _IOR('x', 54, int) /* query AFC on/off */
+#define BT848_SLNOTCH _IOW('x', 55, int) /* set luma notch */
+#define BT848_GLNOTCH _IOR('x', 56, int) /* get luma notch */
+
+/* Read/Write the BT848's I2C bus directly
+ * b7-b0: data (read/write)
+ * b15-b8: internal peripheral register (write)
+ * b23-b16: i2c addr (write)
+ * b31-b24: 1 = write, 0 = read
+ */
+#define BT848_I2CWR _IOWR('x', 57, u_long) /* i2c read-write */
+
+/* Support for radio tuner */
+#define RADIO_SETMODE _IOW('x', 58, unsigned int) /* set radio modes */
+#define RADIO_GETMODE _IOR('x', 58, unsigned char) /* get radio modes */
+#define RADIO_AFC 0x01 /* These modes will probably not */
+#define RADIO_MONO 0x02 /* work on the FRxxxx. It does */
+#define RADIO_MUTE 0x08 /* work on the FMxxxx. */
+#define RADIO_SETFREQ _IOW('x', 59, unsigned int) /* set frequency */
+#define RADIO_GETFREQ _IOR('x', 59, unsigned int) /* set frequency */
+ /* Argument is frequency*100MHz */
+
+/*
+ * XXX: more bad magic,
+ * we need to fix the METEORGINPUT to return something public
+ * duplicate them here for now...
+ */
+#define METEOR_DEV0 0x00001000
+#define METEOR_DEV1 0x00002000
+#define METEOR_DEV2 0x00004000
+#define METEOR_DEV3 0x00008000
+#define METEOR_DEV_SVIDEO 0x00006000
+/*
+ * right now I don't know were to put these, but as they are suppose to be
+ * a part of a common video capture interface, these should be relocated to
+ * another place. Probably most of the METEOR_xxx defines need to be
+ * renamed and moved to a common header
+ */
+
+typedef enum { METEOR_PIXTYPE_RGB, METEOR_PIXTYPE_YUV,
+ METEOR_PIXTYPE_YUV_PACKED,
+ METEOR_PIXTYPE_YUV_12 } METEOR_PIXTYPE;
+
+
+struct meteor_pixfmt {
+ u_int index; /* Index in supported pixfmt list */
+ METEOR_PIXTYPE type; /* What's the board gonna feed us */
+ u_int Bpp; /* Bytes per pixel */
+ u_long masks[3]; /* R,G,B or Y,U,V masks, respectively */
+ unsigned swap_bytes :1; /* Bytes swapped within shorts */
+ unsigned swap_shorts:1; /* Shorts swapped within longs */
+};
+
+
+struct bktr_clip {
+ int x_min;
+ int x_max;
+ int y_min;
+ int y_max;
+};
+
+#define BT848_MAX_CLIP_NODE 100
+struct _bktr_clip {
+ struct bktr_clip x[BT848_MAX_CLIP_NODE];
+};
+
+/*
+ * I'm using METEOR_xxx just because that will be common to other interface
+ * and less of a surprise
+ */
+#define METEORSACTPIXFMT _IOW('x', 64, int )
+#define METEORGACTPIXFMT _IOR('x', 64, int )
+#define METEORGSUPPIXFMT _IOWR('x', 65, struct meteor_pixfmt)
+
+/* set clip list */
+#define BT848SCLIP _IOW('x', 66, struct _bktr_clip )
+#define BT848GCLIP _IOR('x', 66, struct _bktr_clip )
+
+
+/* set input format */
+#define BT848SFMT _IOW('x', 67, unsigned long )
+#define BT848GFMT _IOR('x', 67, unsigned long )
+
+/* set clear-buffer-on-start */
+#define BT848SCBUF _IOW('x', 68, int)
+#define BT848GCBUF _IOR('x', 68, int)
+
+/* set capture area */
+/* The capture area is the area of the video image which is grabbed */
+/* Usually the capture area is 640x480 (768x576 PAL) pixels */
+/* This area is then scaled to the dimensions the user requires */
+/* using the METEORGEO ioctl */
+/* However, the capture area could be 400x300 pixels from the top right */
+/* corner of the video image */
+struct bktr_capture_area {
+ int x_offset;
+ int y_offset;
+ int x_size;
+ int y_size;
+};
+#define BT848_SCAPAREA _IOW('x', 69, struct bktr_capture_area)
+#define BT848_GCAPAREA _IOR('x', 69, struct bktr_capture_area)
+
+
+/* Get channel Set */
+#define BT848_MAX_CHNLSET_NAME_LEN 16
+struct bktr_chnlset {
+ short index;
+ short max_channel;
+ char name[BT848_MAX_CHNLSET_NAME_LEN];
+};
+#define TVTUNER_GETCHNLSET _IOWR('x', 70, struct bktr_chnlset)
+
+
+
+/* Infra Red Remote Control */
+struct bktr_remote {
+ unsigned char data[3];
+};
+#define REMOTE_GETKEY _IOR('x', 71, struct bktr_remote)/*read the remote */
+ /*control receiver*/
+ /*returns raw data*/
+
+
+/*
+ * Direct access to GPIO pins. You must add BKTR_GPIO_ACCESS to your kernel
+ * configuration file to use these
+ */
+#define BT848_GPIO_SET_EN _IOW('x', 72, int) /* set gpio_out_en */
+#define BT848_GPIO_GET_EN _IOR('x', 73, int) /* get gpio_out_en */
+#define BT848_GPIO_SET_DATA _IOW('x', 74, int) /* set gpio_data */
+#define BT848_GPIO_GET_DATA _IOR('x', 75, int) /* get gpio_data */
+
+
+
+/* XXX - Copied from /sys/pci/brktree_reg.h */
+#define BT848_IFORM_FORMAT (0x7<<0)
+# define BT848_IFORM_F_RSVD (0x7)
+# define BT848_IFORM_F_SECAM (0x6)
+# define BT848_IFORM_F_PALN (0x5)
+# define BT848_IFORM_F_PALM (0x4)
+# define BT848_IFORM_F_PALBDGHI (0x3)
+# define BT848_IFORM_F_NTSCJ (0x2)
+# define BT848_IFORM_F_NTSCM (0x1)
+# define BT848_IFORM_F_AUTO (0x0)
+
+
+#endif /* _MACHINE_IOCTL_BT848_H_ */
+
diff --git a/sys/ia64/include/ioctl_fd.h b/sys/ia64/include/ioctl_fd.h
new file mode 100644
index 0000000..dd6e9d0
--- /dev/null
+++ b/sys/ia64/include/ioctl_fd.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 1992-1994 by Joerg Wunsch, Dresden
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * $FreeBSD$
+ * from: ioctl_fd.h,v 1.11
+ */
+
+#ifndef _MACHINE_IOCTL_FD_H_
+#define _MACHINE_IOCTL_FD_H_
+
+#ifndef _KERNEL
+#include <sys/types.h>
+#endif
+#include <sys/ioccom.h>
+
+#define FD_FORMAT_VERSION 110 /* used to validate before formatting */
+#define FD_MAX_NSEC 36 /* highest known number of spt - allow for */
+ /* 2.88 MB drives */
+
+struct fd_formb {
+ int format_version; /* == FD_FORMAT_VERSION */
+ int cyl, head;
+ int transfer_rate; /* fdreg.h: FDC_???KBPS */
+
+ union {
+ struct fd_form_data {
+ /*
+ * DO NOT CHANGE THE LAYOUT OF THIS STRUCTS
+ * it is hardware-dependent since it exactly
+ * matches the byte sequence to write to FDC
+ * during its `format track' operation
+ */
+ u_char secshift; /* 0 -> 128, ...; usually 2 -> 512 */
+ u_char nsecs; /* must be <= FD_MAX_NSEC */
+ u_char gaplen; /* GAP 3 length; usually 84 */
+ u_char fillbyte; /* usually 0xf6 */
+ struct fd_idfield_data {
+ /*
+ * data to write into id fields;
+ * for obscure formats, they mustn't match
+ * the real values (but mostly do)
+ */
+ u_char cylno; /* 0 thru 79 (or 39) */
+ u_char headno; /* 0, or 1 */
+ u_char secno; /* starting at 1! */
+ u_char secsize; /* usually 2 */
+ } idfields[FD_MAX_NSEC]; /* 0 <= idx < nsecs used */
+ } structured;
+ u_char raw[1]; /* to have continuous indexed access */
+ } format_info;
+};
+
+/* make life easier */
+# define fd_formb_secshift format_info.structured.secshift
+# define fd_formb_nsecs format_info.structured.nsecs
+# define fd_formb_gaplen format_info.structured.gaplen
+# define fd_formb_fillbyte format_info.structured.fillbyte
+/* these data must be filled in for(i = 0; i < fd_formb_nsecs; i++) */
+# define fd_formb_cylno(i) format_info.structured.idfields[i].cylno
+# define fd_formb_headno(i) format_info.structured.idfields[i].headno
+# define fd_formb_secno(i) format_info.structured.idfields[i].secno
+# define fd_formb_secsize(i) format_info.structured.idfields[i].secsize
+
+struct fd_type {
+ int sectrac; /* sectors per track */
+ int secsize; /* size code for sectors */
+ int datalen; /* data len when secsize = 0 */
+ int gap; /* gap len between sectors */
+ int tracks; /* total num of tracks */
+ int size; /* size of disk in sectors */
+ int steptrac; /* steps per cylinder */
+ int trans; /* transfer speed code */
+ int heads; /* number of heads */
+ int f_gap; /* format gap len */
+ int f_inter; /* format interleave factor */
+};
+
+#define FD_FORM _IOW('F', 61, struct fd_formb) /* format a track */
+#define FD_GTYPE _IOR('F', 62, struct fd_type) /* get drive type */
+#define FD_STYPE _IOW('F', 63, struct fd_type) /* set drive type */
+
+#define FD_GOPTS _IOR('F', 64, int) /* drive options, see below */
+#define FD_SOPTS _IOW('F', 65, int)
+
+#define FD_DEBUG _IOW('F', 66, int)
+
+#define FDOPT_NORETRY 0x0001 /* no retries on failure (cleared on close) */
+
+/*
+ * The following definitions duplicate those in sys/i386/isa/fdreg.h
+ * They are here since their values are to be used in the above
+ * structure when formatting a floppy. For very obvious reasons, both
+ * definitions must match ;-)
+ */
+#ifndef FDC_500KBPS
+#define FDC_500KBPS 0x00 /* 500KBPS MFM drive transfer rate */
+#define FDC_300KBPS 0x01 /* 300KBPS MFM drive transfer rate */
+#define FDC_250KBPS 0x02 /* 250KBPS MFM drive transfer rate */
+#define FDC_125KBPS 0x03 /* 125KBPS FM drive transfer rate */
+ /* for some controllers 1MPBS instead */
+#endif /* FDC_500KBPS */
+
+#endif /* !_MACHINE_IOCTL_FD_H_ */
diff --git a/sys/ia64/include/ioctl_meteor.h b/sys/ia64/include/ioctl_meteor.h
new file mode 100644
index 0000000..f62d392
--- /dev/null
+++ b/sys/ia64/include/ioctl_meteor.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 1995 Mark Tinguely and Jim Lowe
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Tinguely and Jim Lowe
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * ioctl constants for Matrox Meteor Capture card.
+ */
+
+#ifndef _MACHINE_IOCTL_METEOR_H_
+#define _MACHINE_IOCTL_METEOR_H_
+
+#ifndef _KERNEL
+#include <sys/types.h>
+#endif
+#include <sys/ioccom.h>
+
+struct meteor_capframe {
+ short command; /* see below for valid METEORCAPFRM commands */
+ short lowat; /* start transfer if < this number */
+ short hiwat; /* stop transfer if > this number */
+} ;
+
+/* structure for METEOR[GS]ETGEO - get/set geometry */
+struct meteor_geomet {
+ u_short rows;
+ u_short columns;
+ u_short frames;
+ u_long oformat;
+} ;
+
+/* structure for METEORGCOUNT-get count of frames, fifo errors and dma errors */
+struct meteor_counts {
+ u_long fifo_errors; /* count of fifo errors since open */
+ u_long dma_errors; /* count of dma errors since open */
+ u_long frames_captured; /* count of frames captured since open */
+ u_long even_fields_captured; /* count of even fields captured */
+ u_long odd_fields_captured; /* count of odd fields captured */
+} ;
+
+/* structure for getting and setting direct transfers to vram */
+struct meteor_video {
+ u_long addr; /* Address of location to dma to */
+ u_long width; /* Width of memory area */
+ u_long banksize; /* Size of Vram bank */
+ u_long ramsize; /* Size of Vram */
+};
+
+#define METEORCAPTUR _IOW('x', 1, int) /* capture a frame */
+#define METEORCAPFRM _IOW('x', 2, struct meteor_capframe) /* sync capture */
+#define METEORSETGEO _IOW('x', 3, struct meteor_geomet) /* set geometry */
+#define METEORGETGEO _IOR('x', 4, struct meteor_geomet) /* get geometry */
+#define METEORSTATUS _IOR('x', 5, unsigned short) /* get status */
+#define METEORSHUE _IOW('x', 6, signed char) /* set hue */
+#define METEORGHUE _IOR('x', 6, signed char) /* get hue */
+#define METEORSFMT _IOW('x', 7, unsigned long) /* set format */
+#define METEORGFMT _IOR('x', 7, unsigned long) /* get format */
+#define METEORSINPUT _IOW('x', 8, unsigned long) /* set input dev */
+#define METEORGINPUT _IOR('x', 8, unsigned long) /* get input dev */
+#define METEORSCHCV _IOW('x', 9, unsigned char) /* set uv gain */
+#define METEORGCHCV _IOR('x', 9, unsigned char) /* get uv gain */
+#define METEORSCOUNT _IOW('x',10, struct meteor_counts)
+#define METEORGCOUNT _IOR('x',10, struct meteor_counts)
+#define METEORSFPS _IOW('x',11, unsigned short) /* set fps */
+#define METEORGFPS _IOR('x',11, unsigned short) /* get fps */
+#define METEORSSIGNAL _IOW('x', 12, unsigned int) /* set signal */
+#define METEORGSIGNAL _IOR('x', 12, unsigned int) /* get signal */
+#define METEORSVIDEO _IOW('x', 13, struct meteor_video) /* set video */
+#define METEORGVIDEO _IOR('x', 13, struct meteor_video) /* get video */
+#define METEORSBRIG _IOW('x', 14, unsigned char) /* set brightness */
+#define METEORGBRIG _IOR('x', 14, unsigned char) /* get brightness */
+#define METEORSCSAT _IOW('x', 15, unsigned char) /* set chroma sat */
+#define METEORGCSAT _IOR('x', 15, unsigned char) /* get uv saturation */
+#define METEORSCONT _IOW('x', 16, unsigned char) /* set contrast */
+#define METEORGCONT _IOR('x', 16, unsigned char) /* get contrast */
+#define METEORSBT254 _IOW('x', 17, unsigned short) /* set Bt254 reg */
+#define METEORGBT254 _IOR('x', 17, unsigned short) /* get Bt254 reg */
+#define METEORSHWS _IOW('x', 18, unsigned char) /* set hor start reg */
+#define METEORGHWS _IOR('x', 18, unsigned char) /* get hor start reg */
+#define METEORSVWS _IOW('x', 19, unsigned char) /* set vert start reg */
+#define METEORGVWS _IOR('x', 19, unsigned char) /* get vert start reg */
+#define METEORSTS _IOW('x', 20, unsigned char) /* set time stamp */
+#define METEORGTS _IOR('x', 20, unsigned char) /* get time stamp */
+
+#define METEOR_STATUS_ID_MASK 0xf000 /* ID of 7196 */
+#define METEOR_STATUS_DIR 0x0800 /* Direction of Expansion port YUV */
+#define METEOR_STATUS_OEF 0x0200 /* Field detected: Even/Odd */
+#define METEOR_STATUS_SVP 0x0100 /* State of VRAM Port:inactive/active */
+#define METEOR_STATUS_STTC 0x0080 /* Time Constant: TV/VCR */
+#define METEOR_STATUS_HCLK 0x0040 /* Horiz PLL: locked/unlocked */
+#define METEOR_STATUS_FIDT 0x0020 /* Field detect: 50/60hz */
+#define METEOR_STATUS_ALTD 0x0002 /* Line alt: no line alt/line alt */
+#define METEOR_STATUS_CODE 0x0001 /* Colour info: no colour/colour */
+
+ /* METEORCAPTUR capture options */
+#define METEOR_CAP_SINGLE 0x0001 /* capture one frame */
+#define METEOR_CAP_CONTINOUS 0x0002 /* continuously capture */
+#define METEOR_CAP_STOP_CONT 0x0004 /* stop the continuous capture */
+
+ /* METEORCAPFRM capture commands */
+#define METEOR_CAP_N_FRAMES 0x0001 /* capture N frames */
+#define METEOR_CAP_STOP_FRAMES 0x0002 /* stop capture N frames */
+#define METEOR_HALT_N_FRAMES 0x0003 /* halt of capture N frames */
+#define METEOR_CONT_N_FRAMES 0x0004 /* continue after above halt */
+
+ /* valid video input formats: */
+#define METEOR_FMT_NTSC 0x00100 /* NTSC -- initialized default */
+#define METEOR_FMT_PAL 0x00200 /* PAL */
+#define METEOR_FMT_SECAM 0x00400 /* SECAM */
+#define METEOR_FMT_AUTOMODE 0x00800 /* auto-mode */
+#define METEOR_INPUT_DEV0 0x01000 /* camera input 0 -- default */
+#define METEOR_INPUT_DEV_RCA METEOR_INPUT_DEV0
+#define METEOR_INPUT_DEV1 0x02000 /* camera input 1 */
+#define METEOR_INPUT_DEV2 0x04000 /* camera input 2 */
+#define METEOR_INPUT_DEV3 0x08000 /* camera input 3 */
+#define METEOR_INPUT_DEV_RGB 0x0a000 /* for rgb version of meteor */
+#define METEOR_INPUT_DEV_SVIDEO 0x06000 /* S-video input port */
+
+ /* valid video output formats: */
+#define METEOR_GEO_RGB16 0x0010000 /* packed -- initialized default */
+#define METEOR_GEO_RGB24 0x0020000 /* RBG 24 bits packed */
+ /* internally stored in 32 bits */
+#define METEOR_GEO_YUV_PACKED 0x0040000 /* 4-2-2 YUV 16 bits packed */
+#define METEOR_GEO_YUV_PLANAR 0x0080000 /* 4-2-2 YUV 16 bits planer */
+#define METEOR_GEO_YUV_PLANER METEOR_GEO_YUV_PLANAR
+#define METEOR_GEO_UNSIGNED 0x0400000 /* unsigned uv outputs */
+#define METEOR_GEO_EVEN_ONLY 0x1000000 /* set for even only field capture */
+#define METEOR_GEO_ODD_ONLY 0x2000000 /* set for odd only field capture */
+#define METEOR_GEO_FIELD_MASK 0x3000000
+#define METEOR_GEO_YUV_422 0x4000000 /* 4-2-2 YUV in Y-U-V combined */
+#define METEOR_GEO_OUTPUT_MASK 0x40f0000
+#define METEOR_GEO_YUV_12 0x10000000 /* YUV 12 format */
+#define METEOR_GEO_YUV_9 0x40000000 /* YUV 9 format */
+
+#define METEOR_FIELD_MODE 0x80000000 /* Field cap or Frame cap */
+
+#define METEOR_SIG_MODE_MASK 0xffff0000
+#define METEOR_SIG_FRAME 0x00000000 /* signal every frame */
+#define METEOR_SIG_FIELD 0x00010000 /* signal every field */
+
+ /* following structure is used to coordinate the synchronous */
+
+struct meteor_mem {
+ /* kernel write only */
+ int frame_size; /* row*columns*depth */
+ unsigned num_bufs; /* number of frames in buffer (1-32) */
+ /* user and kernel change these */
+ int lowat; /* kernel starts capture if < this number */
+ int hiwat; /* kernel stops capture if > this number.
+ hiwat <= numbufs */
+ unsigned active; /* bit mask of active frame buffers
+ kernel sets, user clears */
+ int num_active_bufs; /* count of active frame buffer
+ kernel increments, user decrements */
+
+ /* reference to mmapped data */
+ caddr_t buf; /* The real space (virtual addr) */
+} ;
+
+#endif /* !_MACHINE_IOCTL_METEOR_H_ */
diff --git a/sys/ia64/include/ipl.h b/sys/ia64/include/ipl.h
new file mode 100644
index 0000000..2a021eb
--- /dev/null
+++ b/sys/ia64/include/ipl.h
@@ -0,0 +1,132 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IPL_H_
+#define _MACHINE_IPL_H_
+
+
+#include <machine/cpu.h> /* for pal inlines */
+
+/*
+ * Software interrupt bit numbers
+ */
+#define SWI_TTY 0
+#define SWI_NET 1
+#define SWI_CAMNET 2
+#define SWI_CAMBIO 3
+#define SWI_VM 4
+#define SWI_CLOCK 5
+#define SWI_TQ 6
+#define NSWI 32
+#define NHWI 0
+
+extern u_int32_t ipending;
+
+#define getcpl() (alpha_pal_rdps() & ALPHA_PSL_IPL_MASK)
+
+#define SPLDOWN(name, pri) \
+ \
+static __inline int name(void) \
+{ \
+ return 0; \
+}
+
+SPLDOWN(splsoftclock, SOFT)
+SPLDOWN(splsoft, SOFT)
+
+#define SPLUP(name, pri) \
+ \
+static __inline int name(void) \
+{ \
+ return 0; \
+}
+
+SPLUP(splsoftcam, SOFT)
+SPLUP(splsoftnet, SOFT)
+SPLUP(splsoftvm, SOFT)
+SPLUP(splsofttq, SOFT)
+SPLUP(splnet, IO)
+SPLUP(splbio, IO)
+SPLUP(splcam, IO)
+SPLUP(splimp, IO)
+SPLUP(spltty, IO)
+SPLUP(splvm, IO)
+SPLUP(splclock, CLOCK)
+SPLUP(splstatclock, CLOCK)
+SPLUP(splhigh, HIGH)
+
+static __inline void
+spl0(void)
+{
+ if (ipending)
+ do_sir(); /* lowers ipl to SOFT */
+}
+
+static __inline void
+splx(int s)
+{
+}
+
+extern void setdelayed(void);
+extern void setsofttty(void);
+extern void setsoftnet(void);
+extern void setsoftcamnet(void);
+extern void setsoftcambio(void);
+extern void setsoftvm(void);
+extern void setsofttq(void);
+extern void setsoftclock(void);
+
+extern void schedsofttty(void);
+extern void schedsoftnet(void);
+extern void schedsoftcamnet(void);
+extern void schedsoftcambio(void);
+extern void schedsoftvm(void);
+extern void schedsofttq(void);
+extern void schedsoftclock(void);
+
+#if 0
+/* XXX bogus */
+extern unsigned cpl; /* current priority level mask */
+#endif
+
+/*
+ * Interprocessor interrupts for SMP.
+ */
+#define IPI_INVLTLB 0x0001
+#define IPI_RENDEZVOUS 0x0002
+#define IPI_AST 0x0004
+#define IPI_CHECKSTATE 0x0008
+#define IPI_STOP 0x0010
+
+void smp_ipi_selected(u_int32_t cpus, u_int64_t ipi);
+void smp_ipi_all(u_int64_t ipi);
+void smp_ipi_all_but_self(u_int64_t ipi);
+void smp_ipi_self(u_int64_t ipi);
+void smp_handle_ipi(struct trapframe *frame);
+
+#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/ia64/include/limits.h b/sys/ia64/include/limits.h
new file mode 100644
index 0000000..99ca2d8
--- /dev/null
+++ b/sys/ia64/include/limits.h
@@ -0,0 +1,96 @@
+/* $FreeBSD$ */
+/* From: NetBSD: limits.h,v 1.3 1997/04/06 08:47:31 cgd Exp */
+
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ */
+
+#define CHAR_BIT 8 /* number of bits in a char */
+#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values
+ * are written as hex so that GCC will be quiet about large integer constants.
+ */
+#define SCHAR_MAX 0x7f /* max value for a signed char */
+#define SCHAR_MIN (-0x7f-1) /* min value for a signed char */
+
+#define UCHAR_MAX 0xffU /* max value for an unsigned char */
+#define CHAR_MAX 0x7f /* max value for a char */
+#define CHAR_MIN (-0x7f-1) /* min value for a char */
+
+#define USHRT_MAX 0xffffU /* max value for an unsigned short */
+#define SHRT_MAX 0x7fff /* max value for a short */
+#define SHRT_MIN (-0x7fff-1) /* min value for a short */
+
+#define UINT_MAX 0xffffffffU /* max value for an unsigned int */
+#define INT_MAX 0x7fffffff /* max value for an int */
+#define INT_MIN (-0x7fffffff-1) /* min value for an int */
+
+#define ULONG_MAX 0xffffffffffffffffUL /* max for an unsigned long */
+#define LONG_MAX 0x7fffffffffffffffL /* max for a long */
+#define LONG_MIN (-0x7fffffffffffffffL-1) /* min for a long */
+
+#if !defined(_ANSI_SOURCE)
+#define SSIZE_MAX LONG_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE) && !defined(_XOPEN_SOURCE)
+#define SIZE_T_MAX ULONG_MAX /* max value for a size_t */
+
+/* Quads and longs are the same on the alpha */
+#define UQUAD_MAX (ULONG_MAX) /* max value for a uquad_t */
+#define QUAD_MAX (LONG_MAX) /* max value for a quad_t */
+#define QUAD_MIN (LONG_MIN) /* min value for a quad_t */
+
+#endif /* !_POSIX_SOURCE && !_XOPEN_SOURCE */
+#endif /* !_ANSI_SOURCE */
+
+#if (!defined(_ANSI_SOURCE)&&!defined(_POSIX_SOURCE)) || defined(_XOPEN_SOURCE)
+#define LONG_BIT 64
+#define WORD_BIT 32
+
+#define DBL_DIG 15
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MIN 2.2250738585072014E-308
+
+#define FLT_DIG 6
+#define FLT_MAX 3.40282347E+38F
+#define FLT_MIN 1.17549435E-38F
+#endif
diff --git a/sys/ia64/include/lock.h b/sys/ia64/include/lock.h
new file mode 100644
index 0000000..189b966
--- /dev/null
+++ b/sys/ia64/include/lock.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1997, by Steve Passe
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. The name of the developer may NOT be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+
+#ifndef _MACHINE_LOCK_H_
+#define _MACHINE_LOCK_H_
+
+
+/*
+ * Simple spin lock.
+ * It is an error to hold one of these locks while a process is sleeping.
+ */
+struct simplelock {
+ volatile int lock_data;
+};
+
+/* functions in mp_machdep.c */
+void s_lock_init __P((struct simplelock *));
+void s_lock __P((struct simplelock *));
+int s_lock_try __P((struct simplelock *));
+
+/* inline simplelock functions */
+static __inline void
+s_unlock(struct simplelock *lkp)
+{
+#if 0 /* XXX */
+ ia64_st_rel_32(&lkp->lock_data, 0);
+#endif
+}
+
+#define COM_LOCK()
+#define COM_UNLOCK()
+
+#endif /* !_MACHINE_LOCK_H_ */
diff --git a/sys/ia64/include/md_var.h b/sys/ia64/include/md_var.h
new file mode 100644
index 0000000..eeaff20
--- /dev/null
+++ b/sys/ia64/include/md_var.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MD_VAR_H_
+#define _MACHINE_MD_VAR_H_
+
+/*
+ * Miscellaneous machine-dependent declarations.
+ */
+
+extern char sigcode[];
+extern char esigcode[];
+extern int szsigcode;
+extern int Maxmem;
+extern void (*netisrs[32]) __P((void));
+
+struct fpreg;
+struct proc;
+struct reg;
+
+void busdma_swi __P((void));
+void cpu_halt __P((void));
+void cpu_reset __P((void));
+int is_physical_memory __P((vm_offset_t addr));
+void swi_vm __P((void));
+int vm_page_zero_idle __P((void));
+int fill_regs __P((struct proc *, struct reg *));
+int set_regs __P((struct proc *, struct reg *));
+int fill_fpregs __P((struct proc *, struct fpreg *));
+int set_fpregs __P((struct proc *, struct fpreg *));
+
+#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/ia64/include/mouse.h b/sys/ia64/include/mouse.h
new file mode 100644
index 0000000..1f9d63b
--- /dev/null
+++ b/sys/ia64/include/mouse.h
@@ -0,0 +1,336 @@
+/*-
+ * Copyright (c) 1992, 1993 Erik Forsberg.
+ * Copyright (c) 1996, 1997 Kazutaka YOKOTA
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL I BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ * from: i386/include mouse.h,v 1.10
+ */
+
+#ifndef _MACHINE_MOUSE_H_
+#define _MACHINE_MOUSE_H_
+
+#include <sys/types.h>
+#include <sys/ioccom.h>
+
+/* ioctls */
+#define MOUSE_GETSTATUS _IOR('M', 0, mousestatus_t)
+#define MOUSE_GETHWINFO _IOR('M', 1, mousehw_t)
+#define MOUSE_GETMODE _IOR('M', 2, mousemode_t)
+#define MOUSE_SETMODE _IOW('M', 3, mousemode_t)
+#define MOUSE_GETLEVEL _IOR('M', 4, int)
+#define MOUSE_SETLEVEL _IOW('M', 5, int)
+#define MOUSE_GETVARS _IOR('M', 6, mousevar_t)
+#define MOUSE_SETVARS _IOW('M', 7, mousevar_t)
+#define MOUSE_READSTATE _IOWR('M', 8, mousedata_t)
+#define MOUSE_READDATA _IOWR('M', 9, mousedata_t)
+
+#if notyet
+#define MOUSE_SETRESOLUTION _IOW('M', 10, int)
+#define MOUSE_SETSCALING _IOW('M', 11, int)
+#define MOUSE_SETRATE _IOW('M', 12, int)
+#define MOUSE_GETHWID _IOR('M', 13, int)
+#endif
+
+/* mouse status block */
+typedef struct mousestatus {
+ int flags; /* state change flags */
+ int button; /* button status */
+ int obutton; /* previous button status */
+ int dx; /* x movement */
+ int dy; /* y movement */
+ int dz; /* z movement */
+} mousestatus_t;
+
+/* button */
+#define MOUSE_BUTTON1DOWN 0x0001 /* left */
+#define MOUSE_BUTTON2DOWN 0x0002 /* middle */
+#define MOUSE_BUTTON3DOWN 0x0004 /* right */
+#define MOUSE_BUTTON4DOWN 0x0008
+#define MOUSE_BUTTON5DOWN 0x0010
+#define MOUSE_BUTTON6DOWN 0x0020
+#define MOUSE_BUTTON7DOWN 0x0040
+#define MOUSE_BUTTON8DOWN 0x0080
+#define MOUSE_MAXBUTTON 31
+#define MOUSE_STDBUTTONS 0x0007 /* buttons 1-3 */
+#define MOUSE_EXTBUTTONS 0x7ffffff8 /* the others (28 of them!) */
+#define MOUSE_BUTTONS (MOUSE_STDBUTTONS | MOUSE_EXTBUTTONS)
+
+/* flags */
+#define MOUSE_STDBUTTONSCHANGED MOUSE_STDBUTTONS
+#define MOUSE_EXTBUTTONSCHANGED MOUSE_EXTBUTTONS
+#define MOUSE_BUTTONSCHANGED MOUSE_BUTTONS
+#define MOUSE_POSCHANGED 0x80000000
+
+typedef struct mousehw {
+ int buttons; /* -1 if unknown */
+ int iftype; /* MOUSE_IF_XXX */
+ int type; /* mouse/track ball/pad... */
+ int model; /* I/F dependent model ID: MOUSE_MODEL_XXX */
+ int hwid; /* I/F dependent hardware ID
+ * for the PS/2 mouse, it will be PSM_XXX_ID
+ */
+} mousehw_t;
+
+/* iftype */
+#define MOUSE_IF_UNKNOWN (-1)
+#define MOUSE_IF_SERIAL 0
+#define MOUSE_IF_BUS 1
+#define MOUSE_IF_INPORT 2
+#define MOUSE_IF_PS2 3
+#define MOUSE_IF_SYSMOUSE 4
+#define MOUSE_IF_USB 5
+
+/* type */
+#define MOUSE_UNKNOWN (-1) /* should be treated as a mouse */
+#define MOUSE_MOUSE 0
+#define MOUSE_TRACKBALL 1
+#define MOUSE_STICK 2
+#define MOUSE_PAD 3
+
+/* model */
+#define MOUSE_MODEL_UNKNOWN (-1)
+#define MOUSE_MODEL_GENERIC 0
+#define MOUSE_MODEL_GLIDEPOINT 1
+#define MOUSE_MODEL_NETSCROLL 2
+#define MOUSE_MODEL_NET 3
+#define MOUSE_MODEL_INTELLI 4
+#define MOUSE_MODEL_THINK 5
+#define MOUSE_MODEL_EASYSCROLL 6
+#define MOUSE_MODEL_MOUSEMANPLUS 7
+#define MOUSE_MODEL_KIDSPAD 8
+#define MOUSE_MODEL_VERSAPAD 9
+#define MOUSE_MODEL_EXPLORER 10
+#define MOUSE_MODEL_4D 11
+#define MOUSE_MODEL_4DPLUS 12
+
+typedef struct mousemode {
+ int protocol; /* MOUSE_PROTO_XXX */
+ int rate; /* report rate (per sec), -1 if unknown */
+ int resolution; /* MOUSE_RES_XXX, -1 if unknown */
+ int accelfactor; /* accelation factor (must be 1 or greater) */
+ int level; /* driver operation level */
+ int packetsize; /* the length of the data packet */
+ unsigned char syncmask[2]; /* sync. data bits in the header byte */
+} mousemode_t;
+
+/* protocol */
+/*
+ * Serial protocols:
+ * Microsoft, MouseSystems, Logitech, MM series, MouseMan, Hitachi Tablet,
+ * GlidePoint, IntelliMouse, Thinking Mouse, MouseRemote, Kidspad,
+ * VersaPad
+ * Bus mouse protocols:
+ * bus, InPort
+ * PS/2 mouse protocol:
+ * PS/2
+ */
+#define MOUSE_PROTO_UNKNOWN (-1)
+#define MOUSE_PROTO_MS 0 /* Microsoft Serial, 3 bytes */
+#define MOUSE_PROTO_MSC 1 /* Mouse Systems, 5 bytes */
+#define MOUSE_PROTO_LOGI 2 /* Logitech, 3 bytes */
+#define MOUSE_PROTO_MM 3 /* MM series, 3 bytes */
+#define MOUSE_PROTO_LOGIMOUSEMAN 4 /* Logitech MouseMan 3/4 bytes */
+#define MOUSE_PROTO_BUS 5 /* MS/Logitech bus mouse */
+#define MOUSE_PROTO_INPORT 6 /* MS/ATI InPort mouse */
+#define MOUSE_PROTO_PS2 7 /* PS/2 mouse, 3 bytes */
+#define MOUSE_PROTO_HITTAB 8 /* Hitachi Tablet 3 bytes */
+#define MOUSE_PROTO_GLIDEPOINT 9 /* ALPS GlidePoint, 3/4 bytes */
+#define MOUSE_PROTO_INTELLI 10 /* MS IntelliMouse, 4 bytes */
+#define MOUSE_PROTO_THINK 11 /* Kensignton Thinking Mouse, 3/4 bytes */
+#define MOUSE_PROTO_SYSMOUSE 12 /* /dev/sysmouse */
+#define MOUSE_PROTO_X10MOUSEREM 13 /* X10 MouseRemote, 3 bytes */
+#define MOUSE_PROTO_KIDSPAD 14 /* Genius Kidspad */
+#define MOUSE_PROTO_VERSAPAD 15 /* Interlink VersaPad, 6 bytes */
+
+#define MOUSE_RES_UNKNOWN (-1)
+#define MOUSE_RES_DEFAULT 0
+#define MOUSE_RES_LOW (-2)
+#define MOUSE_RES_MEDIUMLOW (-3)
+#define MOUSE_RES_MEDIUMHIGH (-4)
+#define MOUSE_RES_HIGH (-5)
+
+typedef struct mousedata {
+ int len; /* # of data in the buffer */
+ int buf[16]; /* data buffer */
+} mousedata_t;
+
+#if (defined(MOUSE_GETVARS))
+
+typedef struct mousevar {
+ int var[16];
+} mousevar_t;
+
+/* magic numbers in var[0] */
+#define MOUSE_VARS_PS2_SIG 0x00325350 /* 'PS2' */
+#define MOUSE_VARS_BUS_SIG 0x00535542 /* 'BUS' */
+#define MOUSE_VARS_INPORT_SIG 0x00504e49 /* 'INP' */
+
+#endif /* MOUSE_GETVARS */
+
+/* Microsoft Serial mouse data packet */
+#define MOUSE_MSS_PACKETSIZE 3
+#define MOUSE_MSS_SYNCMASK 0x40
+#define MOUSE_MSS_SYNC 0x40
+#define MOUSE_MSS_BUTTONS 0x30
+#define MOUSE_MSS_BUTTON1DOWN 0x20 /* left */
+#define MOUSE_MSS_BUTTON2DOWN 0x00 /* no middle button */
+#define MOUSE_MSS_BUTTON3DOWN 0x10 /* right */
+
+/* Logitech MouseMan data packet (M+ protocol) */
+#define MOUSE_LMAN_BUTTON2DOWN 0x20 /* middle button, the 4th byte */
+
+/* ALPS GlidePoint extention (variant of M+ protocol) */
+#define MOUSE_ALPS_BUTTON2DOWN 0x20 /* middle button, the 4th byte */
+#define MOUSE_ALPS_TAP 0x10 /* `tapping' action, the 4th byte */
+
+/* Kinsington Thinking Mouse extention (variant of M+ protocol) */
+#define MOUSE_THINK_BUTTON2DOWN 0x20 /* lower-left button, the 4th byte */
+#define MOUSE_THINK_BUTTON4DOWN 0x10 /* lower-right button, the 4th byte */
+
+/* MS IntelliMouse (variant of MS Serial) */
+#define MOUSE_INTELLI_PACKETSIZE 4
+#define MOUSE_INTELLI_BUTTON2DOWN 0x10 /* middle button in the 4th byte */
+
+/* Mouse Systems Corp. mouse data packet */
+#define MOUSE_MSC_PACKETSIZE 5
+#define MOUSE_MSC_SYNCMASK 0xf8
+#define MOUSE_MSC_SYNC 0x80
+#define MOUSE_MSC_BUTTONS 0x07
+#define MOUSE_MSC_BUTTON1UP 0x04 /* left */
+#define MOUSE_MSC_BUTTON2UP 0x02 /* middle */
+#define MOUSE_MSC_BUTTON3UP 0x01 /* right */
+#define MOUSE_MSC_MAXBUTTON 3
+
+/* MM series mouse data packet */
+#define MOUSE_MM_PACKETSIZE 3
+#define MOUSE_MM_SYNCMASK 0xe0
+#define MOUSE_MM_SYNC 0x80
+#define MOUSE_MM_BUTTONS 0x07
+#define MOUSE_MM_BUTTON1DOWN 0x04 /* left */
+#define MOUSE_MM_BUTTON2DOWN 0x02 /* middle */
+#define MOUSE_MM_BUTTON3DOWN 0x01 /* right */
+#define MOUSE_MM_XPOSITIVE 0x10
+#define MOUSE_MM_YPOSITIVE 0x08
+
+/* PS/2 mouse data packet */
+#define MOUSE_PS2_PACKETSIZE 3
+#define MOUSE_PS2_SYNCMASK 0xc8
+#define MOUSE_PS2_SYNC 0x08
+#define MOUSE_PS2_BUTTONS 0x07 /* 0x03 for 2 button mouse */
+#define MOUSE_PS2_BUTTON1DOWN 0x01 /* left */
+#define MOUSE_PS2_BUTTON2DOWN 0x04 /* middle */
+#define MOUSE_PS2_BUTTON3DOWN 0x02 /* right */
+#define MOUSE_PS2_TAP MOUSE_PS2_SYNC /* GlidePoint (PS/2) `tapping'
+ * Yes! this is the same bit
+ * as SYNC!
+ */
+
+#define MOUSE_PS2_XNEG 0x10
+#define MOUSE_PS2_YNEG 0x20
+#define MOUSE_PS2_XOVERFLOW 0x40
+#define MOUSE_PS2_YOVERFLOW 0x80
+
+/* Logitech MouseMan+ (PS/2) data packet (PS/2++ protocol) */
+#define MOUSE_PS2PLUS_SYNCMASK 0x48
+#define MOUSE_PS2PLUS_SYNC 0x48
+#define MOUSE_PS2PLUS_ZNEG 0x08 /* sign bit */
+#define MOUSE_PS2PLUS_BUTTON4DOWN 0x10 /* 4th button on MouseMan+ */
+#define MOUSE_PS2PLUS_BUTTON5DOWN 0x20
+
+/* IBM ScrollPoint (PS/2) also uses PS/2++ protocol */
+#define MOUSE_SPOINT_ZNEG 0x80 /* sign bits */
+#define MOUSE_SPOINT_WNEG 0x08
+
+/* MS IntelliMouse (PS/2) data packet */
+#define MOUSE_PS2INTELLI_PACKETSIZE 4
+/* some compatible mice have additional buttons */
+#define MOUSE_PS2INTELLI_BUTTON4DOWN 0x40
+#define MOUSE_PS2INTELLI_BUTTON5DOWN 0x80
+
+/* MS IntelliMouse Explorer (PS/2) data packet (variation of IntelliMouse) */
+#define MOUSE_EXPLORER_ZNEG 0x08 /* sign bit */
+/* IntelliMouse Explorer has additional button data in the fourth byte */
+#define MOUSE_EXPLORER_BUTTON4DOWN 0x10
+#define MOUSE_EXPLORER_BUTTON5DOWN 0x20
+
+/* Interlink VersaPad (serial I/F) data packet */
+#define MOUSE_VERSA_PACKETSIZE 6
+#define MOUSE_VERSA_IN_USE 0x04
+#define MOUSE_VERSA_SYNCMASK 0xc3
+#define MOUSE_VERSA_SYNC 0xc0
+#define MOUSE_VERSA_BUTTONS 0x30
+#define MOUSE_VERSA_BUTTON1DOWN 0x20 /* left */
+#define MOUSE_VERSA_BUTTON2DOWN 0x00 /* middle */
+#define MOUSE_VERSA_BUTTON3DOWN 0x10 /* right */
+#define MOUSE_VERSA_TAP 0x08
+
+/* Interlink VersaPad (PS/2 I/F) data packet */
+#define MOUSE_PS2VERSA_PACKETSIZE 6
+#define MOUSE_PS2VERSA_IN_USE 0x10
+#define MOUSE_PS2VERSA_SYNCMASK 0xe8
+#define MOUSE_PS2VERSA_SYNC 0xc8
+#define MOUSE_PS2VERSA_BUTTONS 0x05
+#define MOUSE_PS2VERSA_BUTTON1DOWN 0x04 /* left */
+#define MOUSE_PS2VERSA_BUTTON2DOWN 0x00 /* middle */
+#define MOUSE_PS2VERSA_BUTTON3DOWN 0x01 /* right */
+#define MOUSE_PS2VERSA_TAP 0x02
+
+/* A4 Tech 4D Mouse (PS/2) data packet */
+#define MOUSE_4D_PACKETSIZE 3
+#define MOUSE_4D_WHEELBITS 0xf0
+
+/* A4 Tech 4D+ Mouse (PS/2) data packet */
+#define MOUSE_4DPLUS_PACKETSIZE 3
+#define MOUSE_4DPLUS_ZNEG 0x04 /* sign bit */
+#define MOUSE_4DPLUS_BUTTON4DOWN 0x08
+
+/* sysmouse extended data packet */
+/*
+ * /dev/sysmouse sends data in two formats, depending on the protocol
+ * level. At the level 0, format is exactly the same as MousSystems'
+ * five byte packet. At the level 1, the first five bytes are the same
+ * as at the level 0. There are additional three bytes which shows
+ * `dz' and the states of additional buttons. `dz' is expressed as the
+ * sum of the byte 5 and 6 which contain signed seven bit values.
+ * The states of the button 4 though 10 are in the bit 0 though 6 in
+ * the byte 7 respectively: 1 indicates the button is up.
+ */
+#define MOUSE_SYS_PACKETSIZE 8
+#define MOUSE_SYS_SYNCMASK 0xf8
+#define MOUSE_SYS_SYNC 0x80
+#define MOUSE_SYS_BUTTON1UP 0x04 /* left, 1st byte */
+#define MOUSE_SYS_BUTTON2UP 0x02 /* middle, 1st byte */
+#define MOUSE_SYS_BUTTON3UP 0x01 /* right, 1st byte */
+#define MOUSE_SYS_BUTTON4UP 0x0001 /* 7th byte */
+#define MOUSE_SYS_BUTTON5UP 0x0002
+#define MOUSE_SYS_BUTTON6UP 0x0004
+#define MOUSE_SYS_BUTTON7UP 0x0008
+#define MOUSE_SYS_BUTTON8UP 0x0010
+#define MOUSE_SYS_BUTTON9UP 0x0020
+#define MOUSE_SYS_BUTTON10UP 0x0040
+#define MOUSE_SYS_MAXBUTTON 10
+#define MOUSE_SYS_STDBUTTONS 0x07
+#define MOUSE_SYS_EXTBUTTONS 0x7f /* the others */
+
+/* Mouse remote socket */
+#define _PATH_MOUSEREMOTE "/var/run/MouseRemote"
+
+#endif /* _MACHINE_MOUSE_H_ */
diff --git a/sys/ia64/include/mutex.h b/sys/ia64/include/mutex.h
new file mode 100644
index 0000000..b858079
--- /dev/null
+++ b/sys/ia64/include/mutex.h
@@ -0,0 +1,563 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MUTEX_H_
+#define _MACHINE_MUTEX_H_
+
+#ifndef LOCORE
+
+#include <sys/ktr.h>
+#include <sys/queue.h>
+#include <machine/atomic.h>
+#include <machine/cpufunc.h>
+#include <machine/globaldata.h>
+#include <machine/globals.h>
+
+/*
+ * If kern_mutex.c is being built, compile non-inlined versions of various
+ * functions so that kernel modules can use them.
+ */
+#ifndef _KERN_MUTEX_C_
+#define _MTX_INLINE static __inline
+#else
+#define _MTX_INLINE
+#endif
+
+/*
+ * Mutex flags
+ *
+ * Types
+ */
+#define MTX_DEF 0x1 /* Default (spin/sleep) */
+#define MTX_SPIN 0x2 /* Spin only lock */
+
+/* Options */
+#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
+#define MTX_NORECURSE 0x8 /* No recursion possible */
+#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
+#define MTX_NOSWITCH 0x20 /* Do not switch on release */
+#define MTX_FIRST 0x40 /* First spin lock holder */
+#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
+
+/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
+#define MTX_HARDOPTS (MTX_DEF | MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
+
+/* Flags/value used in mtx_lock */
+#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
+#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
+#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
+#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
+
+struct proc; /* XXX */
+
+/*
+ * Sleep/spin mutex
+ */
+struct mtx {
+ volatile u_int64_t mtx_lock; /* lock owner/gate/flags */
+ volatile u_int32_t mtx_recurse; /* number of recursive holds */
+ u_int32_t mtx_savepsr; /* saved psr (for spin locks) */
+ char *mtx_description;
+ TAILQ_HEAD(, proc) mtx_blocked;
+ LIST_ENTRY(mtx) mtx_contested;
+ struct mtx *mtx_next; /* all locks in system */
+ struct mtx *mtx_prev;
+#ifdef SMP_DEBUG
+ /* If you add anything here, adjust the mtxf_t definition below */
+ struct witness *mtx_witness;
+ LIST_ENTRY(mtx) mtx_held;
+ const char *mtx_file;
+ int mtx_line;
+#endif /* SMP_DEBUG */
+};
+
+/*
+ * Filler for structs which need to remain the same size
+ * whether or not SMP_DEBUG is turned on.
+ */
+typedef struct mtxf {
+#ifdef SMP_DEBUG
+ char mtxf_data[0];
+#else
+ char mtxf_data[4*sizeof(void *) + sizeof(int)];
+#endif
+} mtxf_t;
+
+#define mp_fixme(string)
+
+#ifdef _KERNEL
+/* Misc */
+#define CURTHD ((u_int64_t)CURPROC) /* Current thread ID */
+
+/* Prototypes */
+void mtx_init(struct mtx *m, char *description, int flag);
+void mtx_enter_hard(struct mtx *, int type, int psr);
+void mtx_exit_hard(struct mtx *, int type);
+void mtx_destroy(struct mtx *m);
+
+/*
+ * Wrap the following functions with cpp macros so that filenames and line
+ * numbers are embedded in the code correctly.
+ */
+#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
+void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
+int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
+void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
+#endif
+
+#define mtx_enter(mtxp, type) \
+ _mtx_enter((mtxp), (type), __FILE__, __LINE__)
+
+#define mtx_try_enter(mtxp, type) \
+ _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
+
+#define mtx_exit(mtxp, type) \
+ _mtx_exit((mtxp), (type), __FILE__, __LINE__)
+
+/* Global locks */
+extern struct mtx sched_lock;
+extern struct mtx Giant;
+
+/*
+ * Used to replace return with an exit Giant and return.
+ */
+
+#define EGAR(a) \
+do { \
+ mtx_exit(&Giant, MTX_DEF); \
+ return (a); \
+} while (0)
+
+#define VEGAR \
+do { \
+ mtx_exit(&Giant, MTX_DEF); \
+ return; \
+} while (0)
+
+#define DROP_GIANT() \
+do { \
+ int _giantcnt; \
+ WITNESS_SAVE_DECL(Giant); \
+ \
+ WITNESS_SAVE(&Giant, Giant); \
+ for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
+ mtx_exit(&Giant, MTX_DEF)
+
+#define PICKUP_GIANT() \
+ mtx_assert(&Giant, MA_NOTOWNED); \
+ while (_giantcnt--) \
+ mtx_enter(&Giant, MTX_DEF); \
+ WITNESS_RESTORE(&Giant, Giant); \
+} while (0)
+
+#define PARTIAL_PICKUP_GIANT() \
+ mtx_assert(&Giant, MA_NOTOWNED); \
+ while (_giantcnt--) \
+ mtx_enter(&Giant, MTX_DEF); \
+ WITNESS_RESTORE(&Giant, Giant)
+
+/*
+ * Debugging
+ */
+#ifndef SMP_DEBUG
+#define mtx_assert(m, what)
+#else /* SMP_DEBUG */
+
+#define MA_OWNED 1
+#define MA_NOTOWNED 2
+#define mtx_assert(m, what) { \
+ switch ((what)) { \
+ case MA_OWNED: \
+ ASS(mtx_owned((m))); \
+ break; \
+ case MA_NOTOWNED: \
+ ASS(!mtx_owned((m))); \
+ break; \
+ default: \
+ panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
+ } \
+}
+
+#ifdef INVARIANTS
+#define ASS(ex) MPASS(ex)
+#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
+ #ex, __FILE__, __LINE__)
+#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
+ what, __FILE__, __LINE__)
+
+#ifdef MTX_STRS
+char STR_IEN[] = "psr.i";
+char STR_IDIS[] = "!psr.i";
+#else /* MTX_STRS */
+extern char STR_IEN[];
+extern char STR_IDIS[];
+#endif /* MTX_STRS */
+#define ASS_IEN MPASS2((save_intr() & (1 << 14)), STR_IEN)
+#define ASS_IDIS MPASS2(!(save_intr() & (1 << 14)), STR_IDIS)
+#endif /* INVARIANTS */
+
+#endif /* SMP_DEBUG */
+
+#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
+#define ASS(ex)
+#define MPASS(ex)
+#define MPASS2(ex, where)
+#define ASS_IEN
+#define ASS_IDIS
+#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
+
+#ifdef WITNESS
+#ifndef SMP_DEBUG
+#error WITNESS requires SMP_DEBUG
+#endif /* SMP_DEBUG */
+#define WITNESS_ENTER(m, t, f, l) \
+ if ((m)->mtx_witness != NULL) \
+ witness_enter((m), (t), (f), (l))
+#define WITNESS_EXIT(m, t, f, l) \
+ if ((m)->mtx_witness != NULL) \
+ witness_exit((m), (t), (f), (l))
+
+#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
+#define WITNESS_SAVE_DECL(n) \
+ const char * __CONCAT(n, __wf); \
+ int __CONCAT(n, __wl)
+
+#define WITNESS_SAVE(m, n) \
+do { \
+ if ((m)->mtx_witness != NULL) \
+ witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
+} while (0)
+
+#define WITNESS_RESTORE(m, n) \
+do { \
+ if ((m)->mtx_witness != NULL) \
+ witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
+} while (0)
+
+void witness_init(struct mtx *, int flag);
+void witness_destroy(struct mtx *);
+void witness_enter(struct mtx *, int, const char *, int);
+void witness_try_enter(struct mtx *, int, const char *, int);
+void witness_exit(struct mtx *, int, const char *, int);
+void witness_display(void(*)(const char *fmt, ...));
+void witness_list(struct proc *);
+int witness_sleep(int, struct mtx *, const char *, int);
+void witness_save(struct mtx *, const char **, int *);
+void witness_restore(struct mtx *, const char *, int);
+#else /* WITNESS */
+#define WITNESS_ENTER(m, t, f, l)
+#define WITNESS_EXIT(m, t, f, l)
+#define WITNESS_SLEEP(check, m)
+#define WITNESS_SAVE_DECL(n)
+#define WITNESS_SAVE(m, n)
+#define WITNESS_RESTORE(m, n)
+
+/*
+ * flag++ is slezoid way of shutting up unused parameter warning
+ * in mtx_init()
+ */
+#define witness_init(m, flag) flag++
+#define witness_destroy(m)
+#define witness_enter(m, t, f, l)
+#define witness_try_enter(m, t, f, l)
+#define witness_exit(m, t, f, l)
+#endif /* WITNESS */
+
+/*
+ * Assembly macros (for internal use only)
+ *--------------------------------------------------------------------------
+ */
+
+/*
+ * Get a sleep lock, deal with recursion inline
+ */
+
+#define _V(x) __STRING(x)
+
+#define _getlock_sleep(mp, tid, type) do { \
+ if (ia64_cmpxchg_acq_64(&(mp)->mtx_lock, \
+ MTX_UNOWNED, (tid)) != MTX_UNOWNED) { \
+ if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
+ mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
+ else { \
+ atomic_set_64(&(mp)->mtx_lock, MTX_RECURSE); \
+ (mp)->mtx_recurse++; \
+ } \
+ } \
+} while (0)
+
+/*
+ * Get a spin lock, handle recusion inline (as the less common case)
+ */
+
+#define _getlock_spin_block(mp, tid, type) do { \
+ u_int _psr = save_intr(); \
+ disable_intr(); \
+ if (ia64_cmpxchg_acq_64(&(mp)->mtx_lock, \
+ MTX_UNOWNED, (tid)) != MTX_UNOWNED) \
+ mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _psr); \
+ else \
+ (mp)->mtx_savepsr = _psr; \
+} while (0)
+
+/*
+ * Get a lock without any recursion handling. Calls the hard enter
+ * function if we can't get it inline.
+ */
+
+#define _getlock_norecurse(mp, tid, type) do { \
+ if (ia64_cmpxchg_acq_64(&(mp)->mtx_lock, \
+ MTX_UNOWNED, (tid)) != MTX_UNOWNED) \
+ mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
+} while (0)
+
+/*
+ * Release a sleep lock assuming we haven't recursed on it, recursion is
+ * handled in the hard function.
+ */
+
+#define _exitlock_norecurse(mp, tid, type) do { \
+ if (ia64_cmpxchg_rel_64(&(mp)->mtx_lock, \
+ (tid), MTX_UNOWNED) != (tid)) \
+ mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
+} while (0)
+
+/*
+ * Release a sleep lock when its likely we recursed (the code to
+ * deal with simple recursion is inline).
+ */
+
+#define _exitlock(mp, tid, type) do { \
+ if (ia64_cmpxchg_rel_64(&(mp)->mtx_lock, \
+ (tid), MTX_UNOWNED) != (tid)) { \
+ if (((mp)->mtx_lock & MTX_RECURSE) && \
+ (--(mp)->mtx_recurse == 0)) \
+ atomic_clear_64(&(mp)->mtx_lock, MTX_RECURSE); \
+ else \
+ mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
+ } \
+} while (0)
+
+/*
+ * Release a spin lock (with possible recursion)
+ */
+
+#define _exitlock_spin(mp) do { \
+ if ((mp)->mtx_recurse == 0) { \
+ int _psr = (mp)->mtx_savepsr; \
+ ia64_st_rel_64(&(mp)->mtx_lock, MTX_UNOWNED); \
+ restore_intr(_psr); \
+ } else { \
+ (mp)->mtx_recurse--; \
+ } \
+} while (0)
+
+/*
+ * Externally visible mutex functions
+ *------------------------------------------------------------------------
+ */
+
+/*
+ * Return non-zero if a mutex is already owned by the current thread
+ */
+#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
+
+/* Common strings */
+#ifdef MTX_STRS
+char STR_mtx_enter_fmt[] = "GOT %s [%p] at %s:%d r=%d";
+char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
+char STR_mtx_exit_fmt[] = "REL %s [%p] at %s:%d r=%d";
+char STR_mtx_owned[] = "mtx_owned(mpp)";
+char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
+char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] at %s:%d result=%d";
+#else /* MTX_STRS */
+extern char STR_mtx_enter_fmt[];
+extern char STR_mtx_bad_type[];
+extern char STR_mtx_exit_fmt[];
+extern char STR_mtx_owned[];
+extern char STR_mtx_recurse[];
+extern char STR_mtx_try_enter_fmt[];
+#endif /* MTX_STRS */
+
+#ifndef KLD_MODULE
+/*
+ * Get lock 'm', the macro handles the easy (and most common cases) and
+ * leaves the slow stuff to the mtx_enter_hard() function.
+ *
+ * Note: since type is usually a constant much of this code is optimized out
+ */
+_MTX_INLINE void
+_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
+{
+ struct mtx *mpp = mtxp;
+
+ /* bits only valid on mtx_exit() */
+ MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
+ STR_mtx_bad_type);
+
+ if ((type) & MTX_SPIN) {
+ /*
+ * Easy cases of spin locks:
+ *
+ * 1) We already own the lock and will simply recurse on it (if
+ * RLIKELY)
+ *
+ * 2) The lock is free, we just get it
+ */
+ if ((type) & MTX_RLIKELY) {
+ /*
+ * Check for recursion, if we already have this lock we
+ * just bump the recursion count.
+ */
+ if (mpp->mtx_lock == CURTHD) {
+ mpp->mtx_recurse++;
+ goto done;
+ }
+ }
+
+ if (((type) & MTX_TOPHALF) == 0) {
+ /*
+ * If an interrupt thread uses this we must block
+ * interrupts here.
+ */
+ _getlock_spin_block(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ } else
+ _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ } else {
+ /* Sleep locks */
+ if ((type) & MTX_RLIKELY)
+ _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ else
+ _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ }
+ done:
+ WITNESS_ENTER(mpp, type, file, line);
+ CTR5(KTR_LOCK, STR_mtx_enter_fmt,
+ mpp->mtx_description, mpp, file, line,
+ mpp->mtx_recurse);
+}
+
+/*
+ * Attempt to get MTX_DEF lock, return non-zero if lock acquired
+ *
+ * XXX DOES NOT HANDLE RECURSION
+ */
+_MTX_INLINE int
+_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
+{
+ struct mtx *const mpp = mtxp;
+ int rval;
+
+ rval = atomic_cmpset_64(&mpp->mtx_lock, MTX_UNOWNED, CURTHD);
+#ifdef SMP_DEBUG
+ if (rval && mpp->mtx_witness != NULL) {
+ ASS(mpp->mtx_recurse == 0);
+ witness_try_enter(mpp, type, file, line);
+ }
+#endif
+ CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
+ mpp->mtx_description, mpp, file, line, rval);
+
+ return rval;
+}
+
+/*
+ * Release lock m
+ */
+_MTX_INLINE void
+_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
+{
+ struct mtx *const mpp = mtxp;
+
+ MPASS2(mtx_owned(mpp), STR_mtx_owned);
+ WITNESS_EXIT(mpp, type, file, line);
+ CTR5(KTR_LOCK, STR_mtx_exit_fmt,
+ mpp->mtx_description, mpp, file, line,
+ mpp->mtx_recurse);
+ if ((type) & MTX_SPIN) {
+ if ((type) & MTX_NORECURSE) {
+ MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
+ ia64_st_rel_64(&mpp->mtx_lock, MTX_UNOWNED);
+ if (((type) & MTX_TOPHALF) == 0)
+ restore_intr(mpp->mtx_savepsr);
+ } else
+ if ((type) & MTX_TOPHALF) {
+ _exitlock_norecurse(mpp, CURTHD,
+ (type) & MTX_HARDOPTS);
+ } else
+ _exitlock_spin(mpp);
+ } else {
+ /* Handle sleep locks */
+ if ((type) & MTX_RLIKELY) {
+ _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ } else {
+ _exitlock_norecurse(mpp, CURTHD,
+ (type) & MTX_HARDOPTS);
+ }
+ }
+}
+
+#endif /* KLD_MODULE */
+#endif /* _KERNEL */
+
+#else /* !LOCORE */
+
+/*
+ * Simple assembly macros to get and release non-recursive spin locks
+ */
+#define MTX_ENTER(lck, rPSR, rOLD, rNEW, rLCK) \
+ mov rPSR=psr ; \
+ mov rNEW=globalp ; \
+ mov rLCK=lck+MTX_LOCK ;; \
+ rsm psr.i ; \
+ mov ar.ccv=MTX_UNOWNED ; \
+ add rNEW=PC_CURPROC,rNEW ;; \
+ ld8 rNEW=[rNEW] ;; \
+1: cmpxchg8.acq rOLD=[rLCK],rNEW,ar.ccv ;; \
+ cmp.eq p1,p0=MTX_UNOWNED,rOLD ;; \
+(p1) br.cond.spnt.few 1b ;; \
+ mov rLCK=lck+MTX_SAVEPSR ;; \
+ st4 [rLCK]=rPSR
+
+#define MTX_EXIT(lck, rTMP, rLCK) \
+ mov rTMP=MTX_UNOWNED ; \
+ addl rLCK=@ltoff(lck),gp;; \
+ add rLCK=MTX_LOCK,rLCK;; \
+ st8.rel [rLCK]=rTMP ;; \
+ addl rLCK=@ltoff(lck),gp;; \
+ add rLCK=MTX_SAVEPSR,rLCK;; \
+ ld4 rTMP=[rLCK] ;; \
+ mov psr.l=rTMP ;; \
+ srlz.d
+
+#endif /* !LOCORE */
+
+#endif /* __MACHINE_MUTEX_H */
diff --git a/sys/ia64/include/pal.h b/sys/ia64/include/pal.h
new file mode 100644
index 0000000..39c12a4
--- /dev/null
+++ b/sys/ia64/include/pal.h
@@ -0,0 +1,95 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PAL_H_
+#define _MACHINE_PAL_H_
+
+/*
+ * Architected static calling convention procedures.
+ */
+#define PAL_CACHE_FLUSH 1
+#define PAL_CACHE_INFO 2
+#define PAL_CACHE_INIT 3
+#define PAL_CACHE_SUMMARY 4
+#define PAL_MEM_ATTRIB 5
+#define PAL_PTCE_INFO 6
+#define PAL_VM_INFO 7
+#define PAL_VM_SUMMARY 8
+#define PAL_BUS_GET_FEATURES 9
+#define PAL_BUS_SET_FEATURES 10
+#define PAL_DEBUG_INFO 11
+#define PAL_FIXED_ADDR 12
+#define PAL_FREQ_BASE 13
+#define PAL_FREQ_RATIOS 14
+#define PAL_PERF_MON_INFO 15
+#define PAL_PLATFORM_ADDR 16
+#define PAL_PROC_GET_FEATURE 17
+#define PAL_PROC_SET_FEATURE 18
+#define PAL_RSE_INFO 19
+#define PAL_VERSION 20
+#define PAL_MC_CLEAR_LOG 21
+#define PAL_MC_DRAIN 22
+#define PAL_MC_DYNAMIC_STATE 24
+#define PAL_MC_ERROR_INFO 25
+#define PAL_MC_EXPECTED 23
+#define PAL_MC_REGISTER_MEM 27
+#define PAL_MC_RESUME 26
+#define PAL_HALT 28
+#define PAL_HALT_LIGHT 29
+#define PAL_COPY_INFO 30
+#define PAL_CACHE_LINE_INIT 31
+#define PAL_PMI_ENTRYPOINT 32
+#define PAL_ENTER_IA_32_ENV 33
+#define PAL_VM_PAGE_SIZE 34
+#define PAL_MEM_FOR_TEST 37
+#define PAL_CACHE_PROT_INFO 38
+#define PAL_REGISTER_INFO 39
+#define PAL_SHUTDOWN 40
+#define PAL_PREFETCH_VISIBILITY 41
+
+/*
+ * Architected stacked calling convention procedures.
+ */
+#define PAL_COPY_PAL 256
+#define PAL_HALT_INFO 257
+#define PAL_TEST_PROC 258
+#define PAL_CACHE_READ 259
+#define PAL_CACHE_WRITE 260
+#define PAL_VM_TR_READ 261
+
+struct ia64_pal_result {
+ int64_t pal_status;
+ u_int64_t pal_result[3];
+};
+
+extern struct ia64_pal_result
+ ia64_call_pal_static(u_int64_t proc, u_int64_t arg1,
+ u_int64_t arg2, u_int64_t arg3);
+
+
+#endif /* _MACHINE_PAL_H_ */
diff --git a/sys/ia64/include/param.h b/sys/ia64/include/param.h
new file mode 100644
index 0000000..d67a0fc
--- /dev/null
+++ b/sys/ia64/include/param.h
@@ -0,0 +1,163 @@
+/* $FreeBSD$ */
+/* From: NetBSD: param.h,v 1.20 1997/09/19 13:52:53 leo Exp */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: machparam.h 1.11 89/08/14$
+ *
+ * @(#)param.h 8.1 (Berkeley) 6/10/93
+ */
+
+/*
+ * Machine dependent constants for the IA64.
+ */
+#ifndef _MACHINE
+#define _MACHINE ia64
+#endif
+#ifndef MACHINE
+#define MACHINE "ia64"
+#endif
+#ifndef _MACHINE_ARCH
+#define _MACHINE_ARCH ia64
+#endif
+#ifndef MACHINE_ARCH
+#define MACHINE_ARCH "ia64"
+#endif
+#define MID_MACHINE MID_IA64
+
+#include <machine/ia64_cpu.h>
+#include <machine/cpu.h>
+
+/*
+ * OBJFORMAT_NAMES is a comma-separated list of the object formats
+ * that are supported on the architecture.
+ */
+#define OBJFORMAT_NAMES "elf"
+#define OBJFORMAT_DEFAULT "elf"
+
+#define NCPUS 1
+
+/*
+ * Round p (pointer or byte index) up to a correctly-aligned value for all
+ * data types (int, long, ...). The result is u_long and must be cast to
+ * any desired pointer type.
+ *
+ * ALIGNED_POINTER is a boolean macro that checks whether an address
+ * is valid to fetch data elements of type t from on this architecture.
+ * This does not reflect the optimal alignment, just the possibility
+ * (within reasonable limits).
+ *
+ */
+#define ALIGNBYTES 7
+#define ALIGN(p) (((u_long)(p) + ALIGNBYTES) &~ ALIGNBYTES)
+#define ALIGNED_POINTER(p,t) ((((u_long)(p)) & (sizeof(t)-1)) == 0)
+
+#define PAGE_SIZE 4096 /* bytes/page */
+#define PAGE_SHIFT 12
+#define PAGE_MASK (PAGE_SIZE-1)
+#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
+
+#define KERNBASE 0xfffffc0000300000LL /* start of kernel virtual */
+#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
+
+#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
+#define DEV_BSIZE (1<<DEV_BSHIFT)
+
+#ifndef BLKDEV_IOSIZE
+#define BLKDEV_IOSIZE PAGE_SIZE /* default block device I/O size */
+#endif
+#define DFLTPHYS (64 * 1024) /* default max raw I/O transfer size */
+#define MAXPHYS (128 * 1024) /* max raw I/O transfer size */
+
+#define CLSIZE 1
+#define CLSIZELOG2 0
+
+/* NOTE: SSIZE, SINCR and UPAGES must be multiples of CLSIZE */
+#define SSIZE 1 /* initial stack size/NBPG */
+#define SINCR 1 /* increment of stack/NBPG */
+
+#define UPAGES 4 /* pages of u-area */
+#define USPACE (UPAGES * PAGE_SIZE) /* total size of u-area */
+
+/*
+ * Constants related to network buffer management.
+ * MCLBYTES must be no larger than CLBYTES (the software page size), and,
+ * on machines that exchange pages of input or output buffers with mbuf
+ * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple
+ * of the hardware page size.
+ */
+#define MSIZE 256 /* size of an mbuf */
+#ifndef MCLSHIFT
+# define MCLSHIFT 11 /* convert bytes to m_buf clusters */
+ /* 2K cluster can hold Ether frame */
+#endif /* MCLSHIFT */
+#define MCLBYTES (1 << MCLSHIFT) /* size of a m_buf cluster */
+#define MCLOFSET (MCLBYTES - 1)
+
+/*
+ * Size of kernel malloc arena in CLBYTES-sized logical pages
+ */
+#ifndef NKMEMCLUSTERS
+#define NKMEMCLUSTERS (4096*1024/CLBYTES) /* XXX? */
+#endif
+
+/* pages ("clicks") to disk blocks */
+#define ctod(x) ((x) << (PAGE_SHIFT - DEV_BSHIFT))
+#define dtoc(x) ((x) >> (PAGE_SHIFT - DEV_BSHIFT))
+
+/* pages to bytes */
+#define ctob(x) ((x) << PAGE_SHIFT)
+#define btoc(x) (((x) + PAGE_MASK) >> PAGE_SHIFT)
+
+/* bytes to disk blocks */
+#define btodb(x) ((x) >> DEV_BSHIFT)
+#define dbtob(x) ((x) << DEV_BSHIFT)
+
+/*
+ * Mach derived conversion macros
+ */
+#define round_page(x) ((((unsigned long)(x)) + PAGE_MASK) & ~(PAGE_MASK))
+#define trunc_page(x) ((unsigned long)(x) & ~(PAGE_MASK))
+
+#define atop(x) ((unsigned long)(x) >> PAGE_SHIFT)
+#define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT)
+
+#define ia64_btop(x) ((unsigned long)(x) >> PAGE_SHIFT)
+#define ia64_ptob(x) ((unsigned long)(x) << PAGE_SHIFT)
+
+#define pgtok(x) ((x) * (PAGE_SIZE / 1024))
diff --git a/sys/ia64/include/pcb.h b/sys/ia64/include/pcb.h
new file mode 100644
index 0000000..41c3777
--- /dev/null
+++ b/sys/ia64/include/pcb.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCB_H_
+#define _MACHINE_PCB_H_
+
+/*
+ * PCB: process control block
+ */
+struct pcb {
+ u_int64_t pcb_r4;
+ u_int64_t pcb_r5;
+ u_int64_t pcb_r6;
+ u_int64_t pcb_r7;
+
+ struct ia64_fpreg pcb_f2;
+ struct ia64_fpreg pcb_f3;
+ struct ia64_fpreg pcb_f4;
+ struct ia64_fpreg pcb_f5;
+
+ u_int64_t pcb_old_unat; /* caller's ar.unat */
+ u_int64_t pcb_sp;
+ u_int64_t pcb_pfs;
+ u_int64_t pcb_bspstore;
+
+ u_int64_t pcb_unat; /* ar.unat for r4..r7 */
+ u_int64_t pcb_rnat;
+ u_int64_t pcb_pr; /* predicates */
+ u_int64_t pcb_iip; /* address to restart */
+
+ unsigned long pcb_onfault; /* for copy faults */
+ unsigned long pcb_accessaddr; /* for [fs]uswintr */
+};
+
+/*
+ * The pcb is augmented with machine-dependent additional data for
+ * core dumps. For the Alpha, that's a trap frame.
+ */
+struct md_coredump {
+ struct trapframe md_tf;
+};
+
+#endif /* _MACHINE_PCB_H_ */
diff --git a/sys/ia64/include/pcpu.h b/sys/ia64/include/pcpu.h
new file mode 100644
index 0000000..87c9fe5
--- /dev/null
+++ b/sys/ia64/include/pcpu.h
@@ -0,0 +1,78 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_GLOBALDATA_H_
+#define _MACHINE_GLOBALDATA_H_
+
+#ifdef _KERNEL
+
+#include <sys/queue.h>
+
+/*
+ * This structure maps out the global data that needs to be kept on a
+ * per-cpu basis. genassym uses this to generate offsets for the assembler
+ * code, which also provides external symbols so that C can get at them as
+ * though they were really globals. This structure is pointed to by
+ * the per-cpu system value (see alpha_pal_rdval() and alpha_pal_wrval()).
+ * Inside the kernel, the globally reserved register t7 is used to
+ * point at the globaldata structure.
+ */
+struct globaldata {
+ struct proc *gd_curproc; /* current process */
+ struct proc *gd_idleproc; /* idle process */
+ struct proc *gd_fpcurproc; /* fp state owner */
+ struct pcb *gd_curpcb; /* current pcb */
+ struct timeval gd_switchtime;
+ int gd_switchticks;
+ u_int gd_cpuno; /* this cpu number */
+ u_int gd_other_cpus; /* all other cpus */
+ int gd_inside_intr;
+ u_int64_t gd_idlepcbphys; /* pa of gd_idlepcb */
+ u_int64_t gd_pending_ipis; /* pending IPI events */
+ u_int32_t gd_next_asn; /* next ASN to allocate */
+ u_int32_t gd_current_asngen; /* ASN rollover check */
+ u_int32_t gd_intr_nesting_level; /* interrupt recursion */
+
+ u_int gd_astpending;
+ SLIST_ENTRY(globaldata) gd_allcpu;
+#ifdef KTR_PERCPU
+ volatile int gd_ktr_idx; /* Index into trace table */
+ char *gd_ktr_buf;
+ char gd_ktr_buf_data[0];
+#endif
+};
+
+SLIST_HEAD(cpuhead, globaldata);
+extern struct cpuhead cpuhead;
+
+void globaldata_init(struct globaldata *pcpu, int cpuno, size_t sz);
+struct globaldata *globaldata_find(int cpuno);
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_GLOBALDATA_H_ */
diff --git a/sys/ia64/include/pmap.h b/sys/ia64/include/pmap.h
new file mode 100644
index 0000000..edb7ec5
--- /dev/null
+++ b/sys/ia64/include/pmap.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Derived from hp300 version by Mike Hibler, this version by William
+ * Jolitz uses a recursive map [a pde points to the page directory] to
+ * map the page tables using the pagetables themselves. This is done to
+ * reduce the impact on kernel virtual memory for lots of sparse address
+ * space, and to reduce the cost of memory to each process.
+ *
+ * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
+ * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
+ * from: i386 pmap.h,v 1.54 1997/11/20 19:30:35 bde Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+
+#ifdef LOCORE
+
+#define PTE_P (1<<0)
+#define PTE_MA_WB (0<<2)
+#define PTE_MA_UC (4<<2)
+#define PTE_MA_UCE (5<<2)
+#define PTE_MA_WC (6<<2)
+#define PTE_MA_NATPAGE (7<<2)
+#define PTE_A (1<<5)
+#define PTE_D (1<<6)
+#define PTE_PL_KERN (0<<7)
+#define PTE_PL_USER (3<<7)
+#define PTE_AR_R (0<<9)
+#define PTE_AR_RX (1<<9)
+#define PTE_AR_RW (2<<9)
+#define PTE_AR_RWX (3<<9)
+#define PTE_AR_R_RW (4<<9)
+#define PTE_AR_RX_RWX (5<<9)
+#define PTE_AR_RWX_RW (6<<9)
+#define PTE_AR_X_RX (7<<9)
+
+#else
+
+#define PTE_MA_WB 0
+#define PTE_MA_UC 4
+#define PTE_MA_UCE 5
+#define PTE_MA_WC 6
+#define PTE_MA_NATPAGE 7
+
+#define PTE_PL_KERN 0
+#define PTE_PL_USER 3
+
+#define PTE_AR_R 0
+#define PTE_AR_RX 1
+#define PTE_AR_RW 2
+#define PTE_AR_RWX 3
+#define PTE_AR_R_RW 4
+#define PTE_AR_RX_RWX 5
+#define PTE_AR_RWX_RW 6
+#define PTE_AR_X_RX 7
+
+#define PTE_IG_WIRED 1
+#define PTE_IG_MANAGED 2
+
+/*
+ * A short-format VHPT entry. Also matches the TLB insertion format.
+ */
+struct ia64_pte {
+ u_int64_t pte_p :1; /* bits 0..0 */
+ u_int64_t pte_rv1 :1; /* bits 1..1 */
+ u_int64_t pte_ma :3; /* bits 2..4 */
+ u_int64_t pte_a :1; /* bits 5..5 */
+ u_int64_t pte_d :1; /* bits 6..6 */
+ u_int64_t pte_pl :2; /* bits 7..8 */
+ u_int64_t pte_ar :3; /* bits 9..11 */
+ u_int64_t pte_ppn :38; /* bits 12..49 */
+ u_int64_t pte_rv2 :2; /* bits 50..51 */
+ u_int64_t pte_ed :1; /* bits 52..52 */
+ u_int64_t pte_ig :11; /* bits 53..63 */
+};
+
+/*
+ * A long-format VHPT entry.
+ */
+struct ia64_lpte {
+ u_int64_t pte_p :1; /* bits 0..0 */
+ u_int64_t pte_rv1 :1; /* bits 1..1 */
+ u_int64_t pte_ma :3; /* bits 2..4 */
+ u_int64_t pte_a :1; /* bits 5..5 */
+ u_int64_t pte_d :1; /* bits 6..6 */
+ u_int64_t pte_pl :2; /* bits 7..8 */
+ u_int64_t pte_ar :3; /* bits 9..11 */
+ u_int64_t pte_ppn :38; /* bits 12..49 */
+ u_int64_t pte_rv2 :2; /* bits 50..51 */
+ u_int64_t pte_ed :1; /* bits 52..52 */
+ u_int64_t pte_ig :11; /* bits 53..63 */
+
+ u_int64_t pte_rv3 :2; /* bits 0..1 */
+ u_int64_t pte_ps :6; /* bits 2..7 */
+ u_int64_t pte_key :24; /* bits 8..31 */
+ u_int64_t pte_rv4 :32; /* bits 32..63 */
+
+ u_int64_t pte_tag; /* includes ti */
+
+ u_int64_t pte_chain; /* pa of collision chain */
+};
+
+#include <sys/queue.h>
+
+#ifdef _KERNEL
+
+/*
+ * Routine: pmap_kextract
+ * Function:
+ * Extract the physical page address associated
+ * kernel virtual address.
+ */
+static __inline vm_offset_t
+pmap_kextract(vm_offset_t va)
+{
+ return ia64_tpa(va);
+}
+
+#define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
+
+#endif /* _KERNEL */
+
+/*
+ * Pmap stuff
+ */
+struct pv_entry;
+
+struct md_page {
+ int pv_list_count;
+ TAILQ_HEAD(,pv_entry) pv_list;
+};
+
+struct pmap {
+ TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
+ int pm_count; /* reference count */
+ int pm_flags; /* pmap flags */
+ int pm_active; /* active flag */
+ int pm_asn; /* address space number */
+ u_int pm_asngen; /* generation number of pm_asn */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ struct vm_page *pm_ptphint; /* pmap ptp hint */
+};
+
+#define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
+
+#define PM_FLAG_LOCKED 0x1
+#define PM_FLAG_WANTED 0x2
+
+typedef struct pmap *pmap_t;
+
+#ifdef _KERNEL
+extern pmap_t kernel_pmap;
+#endif
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+ struct ia64_lpte pv_pte; /* pte for collision walker */
+ pmap_t pv_pmap; /* pmap where mapping lies */
+ vm_offset_t pv_va; /* virtual address for mapping */
+ TAILQ_ENTRY(pv_entry) pv_list;
+ TAILQ_ENTRY(pv_entry) pv_plist;
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+#ifdef _KERNEL
+
+extern vm_offset_t avail_end;
+extern vm_offset_t avail_start;
+extern vm_offset_t clean_eva;
+extern vm_offset_t clean_sva;
+extern vm_offset_t phys_avail[];
+extern char *ptvmmap; /* poor name! */
+extern vm_offset_t virtual_avail;
+extern vm_offset_t virtual_end;
+
+vm_offset_t pmap_steal_memory __P((vm_size_t));
+void pmap_bootstrap __P((void));
+void pmap_setdevram __P((unsigned long long basea, vm_offset_t sizea));
+int pmap_uses_prom_console __P((void));
+pmap_t pmap_kernel __P((void));
+void *pmap_mapdev __P((vm_offset_t, vm_size_t));
+unsigned *pmap_pte __P((pmap_t, vm_offset_t)) __pure2;
+vm_page_t pmap_use_pt __P((pmap_t, vm_offset_t));
+void pmap_set_opt __P((unsigned *));
+void pmap_set_opt_bsp __P((void));
+void pmap_deactivate __P((struct proc *p));
+void pmap_emulate_reference __P((struct proc *p, vm_offset_t v, int user, int write));
+
+#endif /* _KERNEL */
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/ia64/include/proc.h b/sys/ia64/include/proc.h
new file mode 100644
index 0000000..a2e9716
--- /dev/null
+++ b/sys/ia64/include/proc.h
@@ -0,0 +1,49 @@
+/* $FreeBSD$ */
+/* From: NetBSD: proc.h,v 1.3 1997/04/06 08:47:36 cgd Exp */
+
+/*
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <machine/globaldata.h>
+#include <machine/globals.h>
+
+/*
+ * Machine-dependent part of the proc struct for the Alpha.
+ */
+
+struct mdproc {
+ u_long md_flags;
+ struct trapframe *md_tf; /* trap/syscall registers */
+ struct pcb *md_pcbpaddr; /* phys addr of the pcb */
+};
+
+#define MDP_FPUSED 0x0001 /* Process used the FPU */
+#define MDP_UAC_NOPRINT 0x0010 /* Don't print unaligned traps */
+#define MDP_UAC_NOFIX 0x0020 /* Don't fixup unaligned traps */
+#define MDP_UAC_SIGBUS 0x0040 /* Deliver SIGBUS upon
+ unaligned access */
+#define MDP_UAC_MASK (MDP_UAC_NOPRINT | MDP_UAC_NOFIX | MDP_UAC_SIGBUS)
diff --git a/sys/ia64/include/profile.h b/sys/ia64/include/profile.h
new file mode 100644
index 0000000..57fbb69
--- /dev/null
+++ b/sys/ia64/include/profile.h
@@ -0,0 +1,237 @@
+/* $FreeBSD$ */
+/* From: NetBSD: profile.h,v 1.9 1997/04/06 08:47:37 cgd Exp */
+
+/*
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#define _MCOUNT_DECL void mcount
+
+#define FUNCTION_ALIGNMENT 32
+
+typedef u_long fptrdiff_t;
+
+#if 0
+/*
+ * XXX The definition of MCOUNT below is really the following code, run
+ * XXX through cpp, since the inline assembly isn't preprocessed.
+ */
+#define OFFSET_AT 0
+#define OFFSET_V0 8
+#define OFFSET_T0 16
+#define OFFSET_T1 24
+#define OFFSET_T2 32
+#define OFFSET_T3 40
+#define OFFSET_T4 48
+#define OFFSET_T5 56
+#define OFFSET_T6 64
+#define OFFSET_T7 72
+#define OFFSET_S6 80
+#define OFFSET_A0 88
+#define OFFSET_A1 96
+#define OFFSET_A2 104
+#define OFFSET_A3 112
+#define OFFSET_A4 120
+#define OFFSET_A5 128
+#define OFFSET_T8 136
+#define OFFSET_T9 144
+#define OFFSET_T10 152
+#define OFFSET_T11 160
+#define OFFSET_RA 168
+#define OFFSET_T12 176
+#define OFFSET_GP 184
+#define FRAME_SIZE 192
+
+LEAF(_mcount,0) /* XXX */
+ .set noat
+ .set noreorder
+
+ lda sp, -FRAME_SIZE(sp)
+
+ stq at_reg, OFFSET_AT(sp)
+ stq v0, OFFSET_V0(sp)
+ stq t0, OFFSET_T0(sp)
+ stq t1, OFFSET_T1(sp)
+ stq t2, OFFSET_T2(sp)
+ stq t3, OFFSET_T3(sp)
+ stq t4, OFFSET_T4(sp)
+ stq t5, OFFSET_T5(sp)
+ stq t6, OFFSET_T6(sp)
+ stq t7, OFFSET_T7(sp)
+ stq s6, OFFSET_S6(sp) /* XXX because run _after_ prologue. */
+ stq a0, OFFSET_A0(sp)
+ stq a1, OFFSET_A1(sp)
+ stq a2, OFFSET_A2(sp)
+ stq a3, OFFSET_A3(sp)
+ stq a4, OFFSET_A4(sp)
+ stq a5, OFFSET_A5(sp)
+ stq t8, OFFSET_T8(sp)
+ stq t9, OFFSET_T9(sp)
+ stq t10, OFFSET_T10(sp)
+ stq t11, OFFSET_T11(sp)
+ stq ra, OFFSET_RA(sp)
+ stq t12, OFFSET_T12(sp)
+ stq gp, OFFSET_GP(sp)
+
+ br pv, LX99
+LX99: SETGP(pv)
+ mov ra, a0
+ mov at_reg, a1
+ CALL(mcount)
+
+ ldq v0, OFFSET_V0(sp)
+ ldq t0, OFFSET_T0(sp)
+ ldq t1, OFFSET_T1(sp)
+ ldq t2, OFFSET_T2(sp)
+ ldq t3, OFFSET_T3(sp)
+ ldq t4, OFFSET_T4(sp)
+ ldq t5, OFFSET_T5(sp)
+ ldq t6, OFFSET_T6(sp)
+ ldq t7, OFFSET_T7(sp)
+ ldq s6, OFFSET_S6(sp) /* XXX because run _after_ prologue. */
+ ldq a0, OFFSET_A0(sp)
+ ldq a1, OFFSET_A1(sp)
+ ldq a2, OFFSET_A2(sp)
+ ldq a3, OFFSET_A3(sp)
+ ldq a4, OFFSET_A4(sp)
+ ldq a5, OFFSET_A5(sp)
+ ldq t8, OFFSET_T8(sp)
+ ldq t9, OFFSET_T9(sp)
+ ldq t10, OFFSET_T10(sp)
+ ldq t11, OFFSET_T11(sp)
+ ldq ra, OFFSET_RA(sp)
+ stq t12, OFFSET_T12(sp)
+ ldq gp, OFFSET_GP(sp)
+
+ ldq at_reg, OFFSET_AT(sp)
+
+ lda sp, FRAME_SIZE(sp)
+ ret zero, (at_reg), 1
+
+ END(_mcount)
+#endif /* 0 */
+
+#define MCOUNT __asm (" \
+ .globl _mcount; \
+ .ent _mcount 0; \
+_mcount:; \
+ .frame $30,0,$26; \
+ .set noat; \
+ .set noreorder; \
+ \
+ lda $30, -192($30); \
+ \
+ stq $28, 0($30); \
+ stq $0, 8($30); \
+ stq $1, 16($30); \
+ stq $2, 24($30); \
+ stq $3, 32($30); \
+ stq $4, 40($30); \
+ stq $5, 48($30); \
+ stq $6, 56($30); \
+ stq $7, 64($30); \
+ stq $8, 72($30); \
+ stq $15, 80($30); \
+ stq $16, 88($30); \
+ stq $17, 96($30); \
+ stq $18, 104($30); \
+ stq $19, 112($30); \
+ stq $20, 120($30); \
+ stq $21, 128($30); \
+ stq $22, 136($30); \
+ stq $23, 144($30); \
+ stq $24, 152($30); \
+ stq $25, 160($30); \
+ stq $26, 168($30); \
+ stq $27, 176($30); \
+ stq $29, 184($30); \
+ \
+ br $27, LX98; \
+LX98: ldgp $29,0($27); \
+ mov $26, $16; \
+ mov $28, $17; \
+ jsr $26,mcount; \
+ ldgp $29,0($26); \
+ \
+ ldq $0, 8($30); \
+ ldq $1, 16($30); \
+ ldq $2, 24($30); \
+ ldq $3, 32($30); \
+ ldq $4, 40($30); \
+ ldq $5, 48($30); \
+ ldq $6, 56($30); \
+ ldq $7, 64($30); \
+ ldq $8, 72($30); \
+ ldq $15, 80($30); \
+ ldq $16, 88($30); \
+ ldq $17, 96($30); \
+ ldq $18, 104($30); \
+ ldq $19, 112($30); \
+ ldq $20, 120($30); \
+ ldq $21, 128($30); \
+ ldq $22, 136($30); \
+ ldq $23, 144($30); \
+ ldq $24, 152($30); \
+ ldq $25, 160($30); \
+ ldq $26, 168($30); \
+ ldq $27, 176($30); \
+ ldq $29, 184($30); \
+ \
+ ldq $28, 0($30); \
+ \
+ lda $30, 192($30); \
+ ret $31, ($28), 1; \
+ \
+ .end _mcount");
+
+#ifdef _KERNEL
+/*
+ * The following two macros do splhigh and splx respectively.
+ * _alpha_pal_swpipl is a special version of alpha_pal_swpipl which
+ * doesn't include profiling support.
+ *
+ * XXX These macros should probably use inline assembly.
+ */
+#define MCOUNT_ENTER(s) \
+ s = _alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH)
+#define MCOUNT_EXIT(s) \
+ (void)_alpha_pal_swpipl(s);
+#define MCOUNT_DECL(s) u_long s;
+#ifdef GUPROF
+struct gmonparam;
+
+void nullfunc_loop_profiled __P((void));
+void nullfunc_profiled __P((void));
+void startguprof __P((struct gmonparam *p));
+void stopguprof __P((struct gmonparam *p));
+#else
+#define startguprof(p)
+#define stopguprof(p)
+#endif /* GUPROF */
+
+#else /* !_KERNEL */
+typedef u_long uintfptr_t;
+#endif
diff --git a/sys/ia64/include/ptrace.h b/sys/ia64/include/ptrace.h
new file mode 100644
index 0000000..8a7ff84
--- /dev/null
+++ b/sys/ia64/include/ptrace.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ptrace.h 8.1 (Berkeley) 6/11/93
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PTRACE_H_
+#define _MACHINE_PTRACE_H_
+
+/*
+ * Machine dependent trace commands.
+ */
+#define PT_GETREGS (PT_FIRSTMACH + 1)
+#define PT_SETREGS (PT_FIRSTMACH + 2)
+#define PT_GETFPREGS (PT_FIRSTMACH + 3)
+#define PT_SETFPREGS (PT_FIRSTMACH + 4)
+
+#ifdef _KERNEL
+int ptrace_read_u_check __P((struct proc *p, vm_offset_t off, size_t len));
+#endif
+
+#endif
+
diff --git a/sys/ia64/include/reg.h b/sys/ia64/include/reg.h
new file mode 100644
index 0000000..7378e37
--- /dev/null
+++ b/sys/ia64/include/reg.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_REG_H_
+#define _MACHINE_REG_H_
+
+struct ia64_fpreg {
+ u_int64_t fpr_bits[2];
+} __attribute__ ((aligned (16)));
+
+struct reg {
+ u_int64_t r_regs[128];
+};
+
+struct fpreg {
+ struct ia64_fpreg fpr_regs[128];
+};
+
+struct dbreg {
+ u_int64_t dbr_data[8];
+ u_int64_t dbr_inst[8];
+};
+
+#ifdef _KERNEL
+
+struct proc;
+
+void restorefpstate __P((struct fpreg *));
+void savefpstate __P((struct fpreg *));
+void setregs __P((struct proc *, u_long, u_long, u_long));
+#endif
+
+#endif /* _MACHINE_REG_H_ */
diff --git a/sys/ia64/include/reloc.h b/sys/ia64/include/reloc.h
new file mode 100644
index 0000000..a8ce0b1
--- /dev/null
+++ b/sys/ia64/include/reloc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
diff --git a/sys/ia64/include/resource.h b/sys/ia64/include/resource.h
new file mode 100644
index 0000000..28fcc98
--- /dev/null
+++ b/sys/ia64/include/resource.h
@@ -0,0 +1,44 @@
+/* $FreeBSD$ */
+/*
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_RESOURCE_H_
+#define _MACHINE_RESOURCE_H_ 1
+
+/*
+ * Definitions of resource types for Intel Architecture machines
+ * with support for legacy ISA devices and drivers.
+ */
+
+#define SYS_RES_IRQ 1 /* interrupt lines */
+#define SYS_RES_DRQ 2 /* isa dma lines */
+#define SYS_RES_MEMORY 3 /* i/o memory */
+#define SYS_RES_IOPORT 4 /* i/o ports */
+
+#endif /* !_MACHINE_RESOURCE_H_ */
diff --git a/sys/ia64/include/setjmp.h b/sys/ia64/include/setjmp.h
new file mode 100644
index 0000000..5fcfe2a
--- /dev/null
+++ b/sys/ia64/include/setjmp.h
@@ -0,0 +1,46 @@
+/* $FreeBSD$ */
+/* From: NetBSD: setjmp.h,v 1.2 1997/04/06 08:47:41 cgd Exp */
+
+/*
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * machine/setjmp.h: machine dependent setjmp-related information.
+ */
+
+#define _JBLEN 81 /* size, in longs, of a jmp_buf */
+
+/*
+ * jmp_buf and sigjmp_buf are encapsulated in different structs to force
+ * compile-time diagnostics for mismatches. The structs are the same
+ * internally to avoid some run-time errors for mismatches.
+ */
+#ifndef _ANSI_SOURCE
+typedef struct { long _sjb[_JBLEN + 1]; } sigjmp_buf[1];
+#endif /* not ANSI */
+
+typedef struct { long _jb[_JBLEN + 1]; } jmp_buf[1];
diff --git a/sys/ia64/include/sigframe.h b/sys/ia64/include/sigframe.h
new file mode 100644
index 0000000..491ce76
--- /dev/null
+++ b/sys/ia64/include/sigframe.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 1999 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SIGFRAME_H_
+#define _MACHINE_SIGFRAME_H_ 1
+
+struct osigframe {
+};
+
+struct sigframe {
+ ucontext_t sf_uc;
+ siginfo_t sf_si;
+};
+
+#endif /* _MACHINE_SIGFRAME_H_ */
diff --git a/sys/ia64/include/signal.h b/sys/ia64/include/signal.h
new file mode 100644
index 0000000..5474941
--- /dev/null
+++ b/sys/ia64/include/signal.h
@@ -0,0 +1,77 @@
+/* $FreeBSD$ */
+/* From: NetBSD: signal.h,v 1.3 1997/04/06 08:47:43 cgd Exp */
+
+/*
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _MACHINE_SIGNAL_H_
+#define _MACHINE_SIGNAL_H_
+
+typedef long sig_atomic_t;
+
+#ifndef _ANSI_SOURCE
+
+/*
+ * Information pushed on stack when a signal is delivered.
+ * This is used by the kernel to restore state following
+ * execution of the signal handler. It is also made available
+ * to the handler to allow it to restore state properly if
+ * a non-standard exit is performed.
+ *
+ * Note that sc_regs[] and sc_fpregs[]+sc_fpcr are inline
+ * representations of 'struct reg' and 'struct fpreg', respectively.
+ */
+typedef unsigned int osigset_t;
+struct osigcontext {};
+
+/*
+ * The sequence of the fields should match those in
+ * mcontext_t. Keep them in sync!
+ */
+struct sigcontext {
+ sigset_t sc_mask; /* signal mask to restore */
+ unsigned long sc_flags;
+ unsigned long sc_nat;
+ unsigned long sc_sp;
+ unsigned long sc_ip;
+ unsigned long sc_cfm;
+ unsigned long sc_um;
+ unsigned long sc_ar_rsc;
+ unsigned long sc_ar_bsp;
+ unsigned long sc_ar_rnat;
+ unsigned long sc_ar_ccv;
+ unsigned long sc_ar_unat;
+ unsigned long sc_ar_fpsr;
+ unsigned long sc_ar_pfs;
+ unsigned long sc_pr;
+ unsigned long sc_br[8];
+ unsigned long sc_gr[32];
+ struct ia64_fpreg sc_fr[128];
+};
+
+#endif /* !_ANSI_SOURCE */
+#endif /* !_MACHINE_SIGNAL_H_*/
diff --git a/sys/ia64/include/smp.h b/sys/ia64/include/smp.h
new file mode 100644
index 0000000..65c85ef
--- /dev/null
+++ b/sys/ia64/include/smp.h
@@ -0,0 +1,48 @@
+/*
+ * $FreeBSD$
+ */
+#ifndef _MACHINE_SMP_H_
+#define _MACHINE_SMP_H_
+
+#ifdef _KERNEL
+
+#include <machine/mutex.h>
+#include <machine/ipl.h>
+#include <sys/ktr.h>
+
+#ifndef LOCORE
+
+#define BETTER_CLOCK /* unconditional on ia64 */
+
+/* global data in mp_machdep.c */
+extern volatile u_int checkstate_probed_cpus;
+extern volatile u_int checkstate_need_ast;
+extern volatile u_int resched_cpus;
+extern void (*cpustop_restartfunc) __P((void));
+
+extern int smp_active;
+extern int mp_ncpus;
+extern u_int all_cpus;
+extern u_int started_cpus;
+extern u_int stopped_cpus;
+
+/* functions in mp_machdep.c */
+void mp_start(void);
+void mp_announce(void);
+void smp_invltlb(void);
+void forward_statclock(int pscnt);
+void forward_hardclock(int pscnt);
+void forward_signal(struct proc *);
+void forward_roundrobin(void);
+int stop_cpus(u_int);
+int restart_cpus(u_int);
+void smp_rendezvous_action(void);
+void smp_rendezvous(void (*)(void *),
+ void (*)(void *),
+ void (*)(void *),
+ void *arg);
+void smp_init_secondary(void);
+
+#endif /* !LOCORE */
+#endif /* _KERNEL */
+#endif
diff --git a/sys/ia64/include/stdarg.h b/sys/ia64/include/stdarg.h
new file mode 100644
index 0000000..e8dde78
--- /dev/null
+++ b/sys/ia64/include/stdarg.h
@@ -0,0 +1,50 @@
+/* $FreeBSD$ */
+/* From: NetBSD: stdarg.h,v 1.7 1997/04/06 08:47:44 cgd Exp */
+
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stdarg.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MACHINE_STDARG_H_
+#define _MACHINE_STDARG_H_
+
+#include <machine/ansi.h>
+
+typedef _BSD_VA_LIST_ va_list;
+
+#define va_start(list, parmN) __builtin_stdarg_start(&(list), parmN)
+#define va_end __builtin_va_end
+#define va_arg __builtin_va_arg
+
+#endif /* !_MACHINE_STDARG_H_ */
diff --git a/sys/ia64/include/sysarch.h b/sys/ia64/include/sysarch.h
new file mode 100644
index 0000000..b990bcd
--- /dev/null
+++ b/sys/ia64/include/sysarch.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Architecture specific syscalls (alpha)
+ */
+#ifndef _MACHINE_SYSARCH_H_
+#define _MACHINE_SYSARCH_H_
+
+#ifndef _KERNEL
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+__END_DECLS
+#endif
+
+#endif /* !_MACHINE_SYSARCH_H_ */
diff --git a/sys/ia64/include/types.h b/sys/ia64/include/types.h
new file mode 100644
index 0000000..ccb7793
--- /dev/null
+++ b/sys/ia64/include/types.h
@@ -0,0 +1,73 @@
+/* $FreeBSD$ */
+/* From: NetBSD: types.h,v 1.8 1997/04/06 08:47:45 cgd Exp */
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)types.h 8.3 (Berkeley) 1/5/94
+ */
+
+#ifndef _MACHTYPES_H_
+#define _MACHTYPES_H_
+
+#include <sys/cdefs.h>
+
+#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
+typedef struct _physadr {
+ long r[1];
+} *physadr;
+
+typedef struct label_t {
+ long val[10];
+} label_t;
+#endif
+
+typedef unsigned long vm_offset_t;
+typedef long vm_ooffset_t;
+typedef unsigned long vm_pindex_t;
+typedef unsigned long vm_size_t;
+
+
+typedef __int64_t register_t;
+
+#ifdef _KERNEL
+typedef long intfptr_t;
+typedef unsigned long uintfptr_t;
+#endif
+
+/* Interrupt mask (spl, xxx_imask, etc) */
+typedef __uint32_t intrmask_t;
+
+/* Interrupt handler function type */
+typedef void inthand2_t(void *);
+
+#endif /* _MACHTYPES_H_ */
diff --git a/sys/ia64/include/ucontext.h b/sys/ia64/include/ucontext.h
new file mode 100644
index 0000000..bb18d76
--- /dev/null
+++ b/sys/ia64/include/ucontext.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 1999 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_UCONTEXT_H_
+#define _MACHINE_UCONTEXT_H_
+
+#define IA64_MC_FLAG_ONSTACK 0
+#define IA64_MC_FLAG_IN_SYSCALL 1
+#define IA64_MC_FLAG_FPH_VALID 2
+
+typedef struct __mcontext {
+ /*
+ * These fields must match the definition
+ * of struct sigcontext. That way we can support
+ * struct sigcontext and ucontext_t at the same
+ * time.
+ *
+ * We use the same layout as Linux/ia64 to make emulation
+ * easier.
+ */
+ unsigned long mc_flags;
+ unsigned long mc_nat;
+ unsigned long mc_sp;
+ unsigned long mc_ip;
+ unsigned long mc_cfm;
+ unsigned long mc_um;
+ unsigned long mc_ar_rsc;
+ unsigned long mc_ar_bsp;
+ unsigned long mc_ar_rnat;
+ unsigned long mc_ar_ccv;
+ unsigned long mc_ar_unat;
+ unsigned long mc_ar_fpsr;
+ unsigned long mc_ar_pfs;
+ unsigned long mc_pr;
+ unsigned long mc_br[8];
+ unsigned long mc_gr[32];
+ struct ia64_fpreg mc_fr[128];
+} mcontext_t;
+
+#endif /* !_MACHINE_UCONTEXT_H_ */
diff --git a/sys/ia64/include/varargs.h b/sys/ia64/include/varargs.h
new file mode 100644
index 0000000..25b156b
--- /dev/null
+++ b/sys/ia64/include/varargs.h
@@ -0,0 +1,57 @@
+/* $FreeBSD$ */
+/* From: NetBSD: varargs.h,v 1.7 1997/04/06 08:47:46 cgd Exp */
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)varargs.h 8.2 (Berkeley) 3/22/94
+ */
+
+#ifndef _MACHINE_VARARGS_H_
+#define _MACHINE_VARARGS_H_
+
+#include <machine/stdarg.h>
+
+typedef int __builtin_va_alist_t __attribute__((__mode__(__word__)));
+
+#define va_alist __builtin_va_alist
+#define va_dcl __builtin_va_alist_t __builtin_va_alist; ...
+
+#undef va_start
+#define va_start(ap) __builtin_varargs_start(&(ap))
+
+#endif /* !_MACHINE_VARARGS_H_ */
diff --git a/sys/ia64/include/vmparam.h b/sys/ia64/include/vmparam.h
new file mode 100644
index 0000000..2cdc959
--- /dev/null
+++ b/sys/ia64/include/vmparam.h
@@ -0,0 +1,161 @@
+/* $FreeBSD$ */
+/* From: NetBSD: vmparam.h,v 1.6 1997/09/23 23:23:23 mjacob Exp */
+#ifndef _ALPHA_VMPARAM_H
+#define _ALPHA_VMPARAM_H
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: vmparam.h 1.16 91/01/18$
+ *
+ * @(#)vmparam.h 8.2 (Berkeley) 4/22/94
+ */
+
+/*
+ * Machine dependent constants for Alpha.
+ */
+/*
+ * USRTEXT is the start of the user text/data space, while USRSTACK
+ * is the top (end) of the user stack. Immediately above the user stack
+ * resides the user structure, which is UPAGES long and contains the
+ * kernel stack.
+ */
+#define USRTEXT CLBYTES
+/* #define USRSTACK VM_MAXUSER_ADDRESS */
+
+/*
+ * This stack location is suitable for OSF1 emulation. Some OSF
+ * programs are built as 32bit and assume that the stack is reachable
+ * with a 32bit value. OSF1 manages to have a variable location for
+ * the user stack which we should probably also support.
+ */
+#define USRSTACK (0x12000000LL - (UPAGES*PAGE_SIZE))
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#ifndef MAXTSIZ
+#define MAXTSIZ (1<<30) /* max text size (1G) */
+#endif
+#ifndef DFLDSIZ
+#define DFLDSIZ (1<<27) /* initial data size (128M) */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (1<<30) /* max data size (1G) */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (1<<21) /* initial stack size (2M) */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ (1<<25) /* max stack size (32M) */
+#endif
+#ifndef SGROWSIZ
+#define SGROWSIZ (128UL*1024) /* amount to grow stack */
+#endif
+
+/*
+ * PTEs for mapping user space into the kernel for phyio operations.
+ * 64 pte's are enough to cover 8 disks * MAXBSIZE.
+ */
+#ifndef USRIOSIZE
+#define USRIOSIZE 64
+#endif
+
+/*
+ * Boundary at which to place first MAPMEM segment if not explicitly
+ * specified. Should be a power of two. This allows some slop for
+ * the data segment to grow underneath the first mapped segment.
+ */
+#define MMSEG 0x200000
+
+/*
+ * The size of the clock loop.
+ */
+#define LOOPPAGES (maxfree - firstfree)
+
+/*
+ * The time for a process to be blocked before being very swappable.
+ * This is a number of seconds which the system takes as being a non-trivial
+ * amount of real time. You probably shouldn't change this;
+ * it is used in subtle ways (fractions and multiples of it are, that is, like
+ * half of a ``long time'', almost a long time, etc.)
+ * It is related to human patience and other factors which don't really
+ * change over time.
+ */
+#define MAXSLP 20
+
+/*
+ * A swapped in process is given a small amount of core without being bothered
+ * by the page replacement algorithm. Basically this says that if you are
+ * swapped in you deserve some resources. We protect the last SAFERSS
+ * pages against paging and will just swap you out rather than paging you.
+ * Note that each process has at least UPAGES+CLSIZE pages which are not
+ * paged anyways, in addition to SAFERSS.
+ */
+#define SAFERSS 10 /* nominal ``small'' resident set size
+ protected against replacement */
+
+/*
+ * Mach derived constants
+ */
+
+/* user/kernel map constants */
+#define VM_MIN_ADDRESS 0
+#define VM_MAXUSER_ADDRESS IA64_RR_BASE(5)
+#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
+#define VM_MIN_KERNEL_ADDRESS IA64_RR_BASE(5)
+#define VM_MAX_KERNEL_ADDRESS (IA64_RR_BASE(6) - 1)
+
+/* virtual sizes (bytes) for various kernel submaps */
+#ifndef VM_KMEM_SIZE
+#define VM_KMEM_SIZE (12 * 1024 * 1024)
+#endif
+
+/*
+ * How many physical pages per KVA page allocated.
+ * min(max(VM_KMEM_SIZE, Physical memory/VM_KMEM_SIZE_SCALE), VM_KMEM_SIZE_MAX)
+ * is the total KVA space allocated for kmem_map.
+ */
+#ifndef VM_KMEM_SIZE_SCALE
+#define VM_KMEM_SIZE_SCALE (4) /* XXX 8192 byte pages */
+#endif
+
+/* initial pagein size of beginning of executable file */
+#ifndef VM_INITIAL_PAGEIN
+#define VM_INITIAL_PAGEIN 16
+#endif
+
+#endif /* !_ALPHA_VMPARAM_H */
diff --git a/sys/ia64/isa/isa.c b/sys/ia64/isa/isa.c
new file mode 100644
index 0000000..8ac1516
--- /dev/null
+++ b/sys/ia64/isa/isa.c
@@ -0,0 +1,171 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Modifications for Intel architecture by Garrett A. Wollman.
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+
+#include <machine/resource.h>
+
+#include <isa/isareg.h>
+#include <isa/isavar.h>
+#include <isa/isa_common.h>
+
+void
+isa_init(void)
+{
+}
+
+intrmask_t
+isa_irq_pending(void)
+{
+ u_char irr1;
+ u_char irr2;
+
+ irr1 = inb(IO_ICU1);
+ irr2 = inb(IO_ICU2);
+ return ((irr2 << 8) | irr1);
+}
+
+/*
+ * This implementation simply passes the request up to the parent
+ * bus, which in our case is the special i386 nexus, substituting any
+ * configured values if the caller defaulted. We can get away with
+ * this because there is no special mapping for ISA resources on an Intel
+ * platform. When porting this code to another architecture, it may be
+ * necessary to interpose a mapping layer here.
+ */
+struct resource *
+isa_alloc_resource(device_t bus, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ /*
+ * Consider adding a resource definition. We allow rid 0-1 for
+ * irq and drq, 0-3 for memory and 0-7 for ports which is
+ * sufficient for isapnp.
+ */
+ int passthrough = (device_get_parent(child) != bus);
+ int isdefault = (start == 0UL && end == ~0UL);
+ struct isa_device* idev = DEVTOISA(child);
+ struct resource_list *rl = &idev->id_resources;
+ struct resource_list_entry *rle;
+
+ if (!passthrough && !isdefault) {
+ rle = resource_list_find(rl, type, *rid);
+ if (!rle) {
+ if (*rid < 0)
+ return 0;
+ switch (type) {
+ case SYS_RES_IRQ:
+ if (*rid >= ISA_NIRQ)
+ return 0;
+ break;
+ case SYS_RES_DRQ:
+ if (*rid >= ISA_NDRQ)
+ return 0;
+ break;
+ case SYS_RES_MEMORY:
+ if (*rid >= ISA_NMEM)
+ return 0;
+ break;
+ case SYS_RES_IOPORT:
+ if (*rid >= ISA_NPORT)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ resource_list_add(rl, type, *rid, start, end, count);
+ }
+ }
+
+ return resource_list_alloc(rl, bus, child, type, rid,
+ start, end, count, flags);
+}
+
+int
+isa_release_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ struct isa_device* idev = DEVTOISA(child);
+ struct resource_list *rl = &idev->id_resources;
+ return resource_list_release(rl, bus, child, type, rid, r);
+}
+
+/*
+ * We can't use the bus_generic_* versions of these methods because those
+ * methods always pass the bus param as the requesting device, and we need
+ * to pass the child (the i386 nexus knows about this and is prepared to
+ * deal).
+ */
+int
+isa_setup_intr(device_t bus, device_t child, struct resource *r, int flags,
+ void (*ihand)(void *), void *arg, void **cookiep)
+{
+ return (BUS_SETUP_INTR(device_get_parent(bus), child, r, flags,
+ ihand, arg, cookiep));
+}
+
+int
+isa_teardown_intr(device_t bus, device_t child, struct resource *r,
+ void *cookie)
+{
+ return (BUS_TEARDOWN_INTR(device_get_parent(bus), child, r, cookie));
+}
diff --git a/sys/ia64/isa/isa_dma.c b/sys/ia64/isa/isa_dma.c
new file mode 100644
index 0000000..1eb95c9
--- /dev/null
+++ b/sys/ia64/isa/isa_dma.c
@@ -0,0 +1,512 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)isa.c 7.2 (Berkeley) 5/13/91
+ * from: isa_dma.c,v 1.3 1999/05/09 23:56:00 peter Exp $
+ * $FreeBSD$
+ */
+
+/*
+ * code to manage AT bus
+ *
+ * 92/08/18 Frank P. MacLachlan (fpm@crash.cts.com):
+ * Fixed uninitialized variable problem and added code to deal
+ * with DMA page boundaries in isa_dmarangecheck(). Fixed word
+ * mode DMA count compution and reorganized DMA setup code in
+ * isa_dmastart()
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <isa/isareg.h>
+#include <isa/isavar.h>
+#include <i386/isa/ic/i8237.h>
+#include <machine/bus.h>
+
+/*
+** Register definitions for DMA controller 1 (channels 0..3):
+*/
+#define DMA1_CHN(c) (IO_DMA1 + 1*(2*(c))) /* addr reg for channel c */
+#define DMA1_SMSK (IO_DMA1 + 1*10) /* single mask register */
+#define DMA1_MODE (IO_DMA1 + 1*11) /* mode register */
+#define DMA1_FFC (IO_DMA1 + 1*12) /* clear first/last FF */
+#define DMA1_RESET (IO_DMA1 + 1*13) /* reset */
+
+/*
+** Register definitions for DMA controller 2 (channels 4..7):
+*/
+#define DMA2_CHN(c) (IO_DMA2 + 2*(2*(c))) /* addr reg for channel c */
+#define DMA2_SMSK (IO_DMA2 + 2*10) /* single mask register */
+#define DMA2_MODE (IO_DMA2 + 2*11) /* mode register */
+#define DMA2_FFC (IO_DMA2 + 2*12) /* clear first/last FF */
+#define DMA2_RESET (IO_DMA2 + 2*13) /* reset */
+
+static bus_dma_tag_t dma_tag[8];
+static bus_dmamap_t dma_map[8];
+static u_int8_t dma_busy = 0; /* Used in isa_dmastart() */
+static u_int8_t dma_inuse = 0; /* User for acquire/release */
+static u_int8_t dma_auto_mode = 0;
+static u_int8_t dma_bounced = 0;
+
+#define VALID_DMA_MASK (7)
+
+/* high byte of address is stored in this port for i-th dma channel */
+static int dmapageport[8] = { 0x87, 0x83, 0x81, 0x82, 0x8f, 0x8b, 0x89, 0x8a };
+
+/*
+ * Setup a DMA channel's bounce buffer.
+ */
+void
+isa_dmainit(chan, bouncebufsize)
+ int chan;
+ u_int bouncebufsize;
+{
+ static int initted = 0;
+ bus_addr_t boundary = chan >= 4 ? 0x20000 : 0x10000;
+
+ if (!initted) {
+ /*
+ * Reset the DMA hardware.
+ */
+ outb(DMA1_RESET, 0);
+ outb(DMA2_RESET, 0);
+ isa_dmacascade(4);
+
+ initted = 1;
+ }
+
+#ifdef DIAGNOSTIC
+ if (chan & ~VALID_DMA_MASK)
+ panic("isa_dmainit: channel out of range");
+
+ if (dma_tag[chan] || dma_map[chan])
+ panic("isa_dmainit: impossible request");
+#endif
+
+ if (bus_dma_tag_create(/*parent*/NULL,
+ /*alignment*/2,
+ /*boundary*/boundary,
+ /*lowaddr*/BUS_SPACE_MAXADDR_24BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ /*maxsize*/bouncebufsize,
+ /*nsegments*/1, /*maxsegz*/0x3ffff,
+ /*flags*/BUS_DMA_ISA,
+ &dma_tag[chan]) != 0) {
+ panic("isa_dmainit: unable to create dma tag\n");
+ }
+
+ if (bus_dmamap_create(dma_tag[chan], 0, &dma_map[chan])) {
+ panic("isa_dmainit: unable to create dma map\n");
+ }
+
+}
+
+/*
+ * Register a DMA channel's usage. Usually called from a device driver
+ * in open() or during its initialization.
+ */
+int
+isa_dma_acquire(chan)
+ int chan;
+{
+#ifdef DIAGNOSTIC
+ if (chan & ~VALID_DMA_MASK)
+ panic("isa_dma_acquire: channel out of range");
+#endif
+
+ if (dma_inuse & (1 << chan)) {
+ printf("isa_dma_acquire: channel %d already in use\n", chan);
+ return (EBUSY);
+ }
+ dma_inuse |= (1 << chan);
+ dma_auto_mode &= ~(1 << chan);
+
+ return (0);
+}
+
+/*
+ * Unregister a DMA channel's usage. Usually called from a device driver
+ * during close() or during its shutdown.
+ */
+void
+isa_dma_release(chan)
+ int chan;
+{
+#ifdef DIAGNOSTIC
+ if (chan & ~VALID_DMA_MASK)
+ panic("isa_dma_release: channel out of range");
+
+ if ((dma_inuse & (1 << chan)) == 0)
+ printf("isa_dma_release: channel %d not in use\n", chan);
+#endif
+
+ if (dma_busy & (1 << chan)) {
+ dma_busy &= ~(1 << chan);
+ /*
+ * XXX We should also do "dma_bounced &= (1 << chan);"
+ * because we are acting on behalf of isa_dmadone() which
+ * was not called to end the last DMA operation. This does
+ * not matter now, but it may in the future.
+ */
+ }
+
+ dma_inuse &= ~(1 << chan);
+ dma_auto_mode &= ~(1 << chan);
+}
+
+/*
+ * isa_dmacascade(): program 8237 DMA controller channel to accept
+ * external dma control by a board.
+ */
+void
+isa_dmacascade(chan)
+ int chan;
+{
+#ifdef DIAGNOSTIC
+ if (chan & ~VALID_DMA_MASK)
+ panic("isa_dmacascade: channel out of range");
+#endif
+
+ /* set dma channel mode, and set dma channel mode */
+ if ((chan & 4) == 0) {
+ outb(DMA1_MODE, DMA37MD_CASCADE | chan);
+ outb(DMA1_SMSK, chan);
+ } else {
+ outb(DMA2_MODE, DMA37MD_CASCADE | (chan & 3));
+ outb(DMA2_SMSK, chan & 3);
+ }
+}
+
+/*
+ * isa_dmastart(): program 8237 DMA controller channel.
+ */
+
+struct isa_dmastart_arg {
+ caddr_t addr;
+ int chan;
+ int flags;
+};
+
+static void isa_dmastart_cb(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error)
+{
+ caddr_t addr = ((struct isa_dmastart_arg *) arg)->addr;
+ int chan = ((struct isa_dmastart_arg *) arg)->chan;
+ int flags = ((struct isa_dmastart_arg *) arg)->flags;
+ bus_addr_t phys = segs->ds_addr;
+ int nbytes = segs->ds_len;
+ int waport;
+
+ if (nseg != 1)
+ panic("isa_dmastart: transfer mapping not contiguous");
+
+#if 0
+ if ((chipset.sgmap == NULL) &&
+ (pmap_extract(pmap_kernel(), (vm_offset_t)addr)
+ > BUS_SPACE_MAXADDR_24BIT)) {
+ /* we bounced */
+ dma_bounced |= (1 << chan);
+ /* copy bounce buffer on write */
+ if (!(flags & ISADMA_READ))
+ bus_dmamap_sync(dma_tag[chan], dma_map[chan],
+ BUS_DMASYNC_PREWRITE);
+ }
+#endif
+
+ if ((chan & 4) == 0) {
+ /*
+ * Program one of DMA channels 0..3. These are
+ * byte mode channels.
+ */
+ /* set dma channel mode, and reset address ff */
+
+ /* If ISADMA_RAW flag is set, then use autoinitialise mode */
+ if (flags & ISADMA_RAW) {
+ if (flags & ISADMA_READ)
+ outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_WRITE|chan);
+ else
+ outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_READ|chan);
+ }
+ else
+ if (flags & ISADMA_READ)
+ outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|chan);
+ else
+ outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_READ|chan);
+ outb(DMA1_FFC, 0);
+
+ /* send start address */
+ waport = DMA1_CHN(chan);
+ outb(waport, phys);
+ outb(waport, phys>>8);
+ outb(dmapageport[chan], phys>>16);
+
+ /* send count */
+ outb(waport + 1, --nbytes);
+ outb(waport + 1, nbytes>>8);
+
+ /* unmask channel */
+ outb(DMA1_SMSK, chan);
+ } else {
+ /*
+ * Program one of DMA channels 4..7. These are
+ * word mode channels.
+ */
+ /* set dma channel mode, and reset address ff */
+
+ /* If ISADMA_RAW flag is set, then use autoinitialise mode */
+ if (flags & ISADMA_RAW) {
+ if (flags & ISADMA_READ)
+ outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_WRITE|(chan&3));
+ else
+ outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_READ|(chan&3));
+ }
+ else
+ if (flags & ISADMA_READ)
+ outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|(chan&3));
+ else
+ outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_READ|(chan&3));
+ outb(DMA2_FFC, 0);
+
+ /* send start address */
+ waport = DMA2_CHN(chan - 4);
+ outb(waport, phys>>1);
+ outb(waport, phys>>9);
+ outb(dmapageport[chan], phys>>16);
+
+ /* send count */
+ nbytes >>= 1;
+ outb(waport + 2, --nbytes);
+ outb(waport + 2, nbytes>>8);
+
+ /* unmask channel */
+ outb(DMA2_SMSK, chan & 3);
+ }
+}
+
+void
+isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
+{
+ struct isa_dmastart_arg args;
+
+#ifdef DIAGNOSTIC
+ if (chan & ~VALID_DMA_MASK)
+ panic("isa_dmastart: channel out of range");
+
+ if ((chan < 4 && nbytes > (1<<16))
+ || (chan >= 4 && (nbytes > (1<<17) || (uintptr_t)addr & 1)))
+ panic("isa_dmastart: impossible request");
+
+ if ((dma_inuse & (1 << chan)) == 0)
+ printf("isa_dmastart: channel %d not acquired\n", chan);
+#endif
+
+#if 0
+ /*
+ * XXX This should be checked, but drivers like ad1848 only call
+ * isa_dmastart() once because they use Auto DMA mode. If we
+ * leave this in, drivers that do this will print this continuously.
+ */
+ if (dma_busy & (1 << chan))
+ printf("isa_dmastart: channel %d busy\n", chan);
+#endif
+
+ if (!dma_tag || !dma_map[chan])
+ panic("isa_dmastart: called without isa_dmainit");
+
+ dma_busy |= (1 << chan);
+
+ if (flags & ISADMA_RAW) {
+ dma_auto_mode |= (1 << chan);
+ } else {
+ dma_auto_mode &= ~(1 << chan);
+ }
+
+ /*
+ * Freeze dma while updating registers.
+ */
+ outb(chan & 4 ? DMA2_SMSK : DMA1_SMSK, (chan & 3) | 4);
+
+ args.addr = addr;
+ args.chan = chan;
+ args.flags = flags;
+ bus_dmamap_load(dma_tag[chan], dma_map[chan], addr, nbytes,
+ isa_dmastart_cb, &args, 0);
+}
+
+void
+isa_dmadone(int flags, caddr_t addr, int nbytes, int chan)
+{
+#ifdef DIAGNOSTIC
+ if (chan & ~VALID_DMA_MASK)
+ panic("isa_dmadone: channel out of range");
+
+ if ((dma_inuse & (1 << chan)) == 0)
+ printf("isa_dmadone: channel %d not acquired\n", chan);
+#endif
+
+ if (((dma_busy & (1 << chan)) == 0) &&
+ (dma_auto_mode & (1 << chan)) == 0 )
+ printf("isa_dmadone: channel %d not busy\n", chan);
+
+ if (dma_bounced & (1 << chan)) {
+ /* copy bounce buffer on read */
+ if (flags & ISADMA_READ) {
+ bus_dmamap_sync(dma_tag[chan], dma_map[chan],
+ BUS_DMASYNC_POSTREAD);
+ }
+ dma_bounced &= ~(1 << chan);
+ }
+
+ if ((dma_auto_mode & (1 << chan)) == 0) {
+ outb(chan & 4 ? DMA2_SMSK : DMA1_SMSK, (chan & 3) | 4);
+ bus_dmamap_unload(dma_tag[chan], dma_map[chan]);
+ }
+
+ dma_busy &= ~(1 << chan);
+}
+
+/*
+ * Query the progress of a transfer on a DMA channel.
+ *
+ * To avoid having to interrupt a transfer in progress, we sample
+ * each of the high and low databytes twice, and apply the following
+ * logic to determine the correct count.
+ *
+ * Reads are performed with interrupts disabled, thus it is to be
+ * expected that the time between reads is very small. At most
+ * one rollover in the low count byte can be expected within the
+ * four reads that are performed.
+ *
+ * There are three gaps in which a rollover can occur :
+ *
+ * - read low1
+ * gap1
+ * - read high1
+ * gap2
+ * - read low2
+ * gap3
+ * - read high2
+ *
+ * If a rollover occurs in gap1 or gap2, the low2 value will be
+ * greater than the low1 value. In this case, low2 and high2 are a
+ * corresponding pair.
+ *
+ * In any other case, low1 and high1 can be considered to be correct.
+ *
+ * The function returns the number of bytes remaining in the transfer,
+ * or -1 if the channel requested is not active.
+ *
+ */
+int
+isa_dmastatus(int chan)
+{
+ u_long cnt = 0;
+ int ffport, waport;
+ u_long low1, high1, low2, high2;
+ int s;
+
+ /* channel active? */
+ if ((dma_inuse & (1 << chan)) == 0) {
+ printf("isa_dmastatus: channel %d not active\n", chan);
+ return(-1);
+ }
+ /* channel busy? */
+
+ if (((dma_busy & (1 << chan)) == 0) &&
+ (dma_auto_mode & (1 << chan)) == 0 ) {
+ printf("chan %d not busy\n", chan);
+ return -2 ;
+ }
+ if (chan < 4) { /* low DMA controller */
+ ffport = DMA1_FFC;
+ waport = DMA1_CHN(chan) + 1;
+ } else { /* high DMA controller */
+ ffport = DMA2_FFC;
+ waport = DMA2_CHN(chan - 4) + 2;
+ }
+
+ s = splhigh(); /* no interrupts Mr Jones! */
+ outb(ffport, 0); /* clear register LSB flipflop */
+ low1 = inb(waport);
+ high1 = inb(waport);
+ outb(ffport, 0); /* clear again */
+ low2 = inb(waport);
+ high2 = inb(waport);
+ splx(s); /* enable interrupts again */
+
+ /*
+ * Now decide if a wrap has tried to skew our results.
+ * Note that after TC, the count will read 0xffff, while we want
+ * to return zero, so we add and then mask to compensate.
+ */
+ if (low1 >= low2) {
+ cnt = (low1 + (high1 << 8) + 1) & 0xffff;
+ } else {
+ cnt = (low2 + (high2 << 8) + 1) & 0xffff;
+ }
+
+ if (chan >= 4) /* high channels move words */
+ cnt *= 2;
+ return(cnt);
+}
+
+/*
+ * Stop a DMA transfer currently in progress.
+ */
+int
+isa_dmastop(int chan)
+{
+ if ((dma_inuse & (1 << chan)) == 0)
+ printf("isa_dmastop: channel %d not acquired\n", chan);
+
+ if (((dma_busy & (1 << chan)) == 0) &&
+ ((dma_auto_mode & (1 << chan)) == 0)) {
+ printf("chan %d not busy\n", chan);
+ return -2 ;
+ }
+
+ if ((chan & 4) == 0) {
+ outb(DMA1_SMSK, (chan & 3) | 4 /* disable mask */);
+ } else {
+ outb(DMA2_SMSK, (chan & 3) | 4 /* disable mask */);
+ }
+ return(isa_dmastatus(chan));
+}
OpenPOWER on IntegriCloud