summaryrefslogtreecommitdiffstats
path: root/sys/alpha/include
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2000-09-07 01:33:02 +0000
committerjasone <jasone@FreeBSD.org>2000-09-07 01:33:02 +0000
commit769e0f974d8929599ba599ac496510fffc90ff34 (patch)
tree9387522900085835de81e7830e570ef3f6b3ea80 /sys/alpha/include
parentacf1927de02afda4855ec278b1128fd9446405ea (diff)
downloadFreeBSD-src-769e0f974d8929599ba599ac496510fffc90ff34.zip
FreeBSD-src-769e0f974d8929599ba599ac496510fffc90ff34.tar.gz
Major update to the way synchronization is done in the kernel. Highlights
include: * Mutual exclusion is used instead of spl*(). See mutex(9). (Note: The alpha port is still in transition and currently uses both.) * Per-CPU idle processes. * Interrupts are run in their own separate kernel threads and can be preempted (i386 only). Partially contributed by: BSDi (BSD/OS) Submissions by (at least): cp, dfr, dillon, grog, jake, jhb, sheldonh
Diffstat (limited to 'sys/alpha/include')
-rw-r--r--sys/alpha/include/asm.h6
-rw-r--r--sys/alpha/include/cpu.h6
-rw-r--r--sys/alpha/include/cpufunc.h28
-rw-r--r--sys/alpha/include/globaldata.h79
-rw-r--r--sys/alpha/include/globals.h63
-rw-r--r--sys/alpha/include/ipl.h15
-rw-r--r--sys/alpha/include/lock.h32
-rw-r--r--sys/alpha/include/mutex.h563
-rw-r--r--sys/alpha/include/param.h4
-rw-r--r--sys/alpha/include/pcb.h9
-rw-r--r--sys/alpha/include/pcpu.h79
-rw-r--r--sys/alpha/include/pmap.h8
-rw-r--r--sys/alpha/include/proc.h8
-rw-r--r--sys/alpha/include/rpb.h54
-rw-r--r--sys/alpha/include/smp.h53
15 files changed, 971 insertions, 36 deletions
diff --git a/sys/alpha/include/asm.h b/sys/alpha/include/asm.h
index b185295..d46eb97 100644
--- a/sys/alpha/include/asm.h
+++ b/sys/alpha/include/asm.h
@@ -90,6 +90,11 @@
#define sp $30 /* (S) stack pointer */
#define zero $31 /* wired zero */
+/* In the kernel, we use t7 to point at the per-cpu globals. */
+#ifdef _KERNEL
+#define globalp $8
+#endif
+
/* Floating point registers (XXXX VERIFY THIS) */
#define fv0 $f0 /* (T) return value (real) */
#define fv1 $f1 /* (T) return value (imaginary)*/
@@ -266,7 +271,6 @@ _name_:; \
.loc 1 __LINE__; \
bsr ra,exception_save_regs /* jmp/CALL trashes pv/t12 */
-
/*
* LEAF
* Declare a global leaf function.
diff --git a/sys/alpha/include/cpu.h b/sys/alpha/include/cpu.h
index c9d783b..99eb79e 100644
--- a/sys/alpha/include/cpu.h
+++ b/sys/alpha/include/cpu.h
@@ -65,7 +65,7 @@ struct clockframe {
#define CLKF_BASEPRI(framep) \
(((framep)->cf_tf.tf_regs[FRAME_PS] & ALPHA_PSL_IPL_MASK) == 0)
#define CLKF_PC(framep) ((framep)->cf_tf.tf_regs[FRAME_PC])
-#define CLKF_INTR(framep) (intr_nesting_level >= 2)
+#define CLKF_INTR(framep) (PCPU_GET(intr_nesting_level) >= 2)
/*
* Preempt the current process if in interrupt from user mode,
@@ -89,9 +89,10 @@ struct clockframe {
*/
#define signotify(p) aston()
-#define aston() (astpending = 1)
+#define aston() PCPU_SET(astpending, 1)
#ifdef _KERNEL
+extern u_int astpending;
extern u_int32_t intr_nesting_level; /* bookeeping only; counts sw intrs */
extern u_int32_t want_resched; /* resched() was called */
#endif
@@ -132,7 +133,6 @@ struct reg;
struct rpb;
struct trapframe;
-extern struct proc *fpcurproc;
extern struct rpb *hwrpb;
extern volatile int mc_expected, mc_received;
diff --git a/sys/alpha/include/cpufunc.h b/sys/alpha/include/cpufunc.h
index e7d37f0..cabfe0f 100644
--- a/sys/alpha/include/cpufunc.h
+++ b/sys/alpha/include/cpufunc.h
@@ -33,6 +33,7 @@
#include <sys/types.h>
#include <machine/chipset.h>
+#include <machine/alpha_cpu.h>
#ifdef __GNUC__
@@ -44,6 +45,33 @@ breakpoint(void)
#endif
+/*
+ * Bogus interrupt manipulation
+ */
+static __inline void
+disable_intr(void)
+{
+ alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
+}
+
+static __inline void
+enable_intr(void)
+{
+ alpha_pal_swpipl(ALPHA_PSL_IPL_0);
+}
+
+static __inline u_int
+save_intr(void)
+{
+ return alpha_pal_rdps() & ALPHA_PSL_IPL_MASK;
+}
+
+static __inline void
+restore_intr(u_int ipl)
+{
+ alpha_pal_swpipl(ipl);
+}
+
#endif /* _KERNEL */
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/alpha/include/globaldata.h b/sys/alpha/include/globaldata.h
new file mode 100644
index 0000000..b246bb1
--- /dev/null
+++ b/sys/alpha/include/globaldata.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_GLOBALDATA_H_
+#define _MACHINE_GLOBALDATA_H_
+
+#ifdef _KERNEL
+
+#include <sys/queue.h>
+
+/*
+ * This structure maps out the global data that needs to be kept on a
+ * per-cpu basis. genassym uses this to generate offsets for the assembler
+ * code, which also provides external symbols so that C can get at them as
+ * though they were really globals. This structure is pointed to by
+ * the per-cpu system value (see alpha_pal_rdval() and alpha_pal_wrval()).
+ * Inside the kernel, the globally reserved register t7 is used to
+ * point at the globaldata structure.
+ */
+struct globaldata {
+ struct alpha_pcb gd_idlepcb; /* pcb for idling */
+ struct proc *gd_curproc; /* current process */
+ struct proc *gd_idleproc; /* idle process */
+ struct proc *gd_fpcurproc; /* fp state owner */
+ struct pcb *gd_curpcb; /* current pcb */
+ struct timeval gd_switchtime;
+ int gd_switchticks;
+ u_int gd_cpuno; /* this cpu number */
+ u_int gd_other_cpus; /* all other cpus */
+ int gd_inside_intr;
+ u_int64_t gd_idlepcbphys; /* pa of gd_idlepcb */
+ u_int64_t gd_pending_ipis; /* pending IPI events */
+ u_int32_t gd_next_asn; /* next ASN to allocate */
+ u_int32_t gd_current_asngen; /* ASN rollover check */
+ u_int32_t gd_intr_nesting_level; /* interrupt recursion */
+
+ u_int gd_astpending;
+ SLIST_ENTRY(globaldata) gd_allcpu;
+#ifdef KTR_PERCPU
+ volatile int gd_ktr_idx; /* Index into trace table */
+ char *gd_ktr_buf;
+ char gd_ktr_buf_data[0];
+#endif
+};
+
+SLIST_HEAD(cpuhead, globaldata);
+extern struct cpuhead cpuhead;
+
+void globaldata_init(struct globaldata *pcpu, int cpuno, size_t sz);
+struct globaldata *globaldata_find(int cpuno);
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_GLOBALDATA_H_ */
diff --git a/sys/alpha/include/globals.h b/sys/alpha/include/globals.h
new file mode 100644
index 0000000..303efdf
--- /dev/null
+++ b/sys/alpha/include/globals.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_GLOBALS_H_
+#define _MACHINE_GLOBALS_H_
+
+#ifdef _KERNEL
+
+register struct globaldata *globalp __asm__("$8");
+
+#if 1
+#define GLOBALP globalp
+#else
+#define GLOBALP ((struct globaldata *) alpha_pal_rdval())
+#endif
+
+#define PCPU_GET(name) (GLOBALP->gd_##name)
+#define PCPU_SET(name,value) (GLOBALP->gd_##name = (value))
+
+/*
+ * The following set of macros works for UP kernel as well, but for maximum
+ * performance we allow the global variables to be accessed directly. On the
+ * other hand, kernel modules should always use these macros to maintain
+ * portability between UP and SMP kernels.
+ */
+#define CURPROC PCPU_GET(curproc)
+#define curproc PCPU_GET(curproc)
+#define idleproc PCPU_GET(idleproc)
+#define curpcb PCPU_GET(curpcb)
+#define fpcurproc PCPU_GET(fpcurproc)
+#define switchtime PCPU_GET(switchtime)
+#define switchticks PCPU_GET(switchticks)
+#define cpuid PCPU_GET(cpuno)
+#define prevproc PCPU_GET(curproc) /* XXX - until ithreads */
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_GLOBALS_H_ */
diff --git a/sys/alpha/include/ipl.h b/sys/alpha/include/ipl.h
index ea93fbb..2e9b3cc 100644
--- a/sys/alpha/include/ipl.h
+++ b/sys/alpha/include/ipl.h
@@ -127,4 +127,19 @@ extern void schedsoftclock(void);
extern unsigned cpl; /* current priority level mask */
#endif
+/*
+ * Interprocessor interrupts for SMP.
+ */
+#define IPI_INVLTLB 0x0001
+#define IPI_RENDEZVOUS 0x0002
+#define IPI_AST 0x0004
+#define IPI_CHECKSTATE 0x0008
+#define IPI_STOP 0x0010
+
+void smp_ipi_selected(u_int32_t cpus, u_int64_t ipi);
+void smp_ipi_all(u_int64_t ipi);
+void smp_ipi_all_but_self(u_int64_t ipi);
+void smp_ipi_self(u_int64_t ipi);
+void smp_handle_ipi(struct trapframe *frame);
+
#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/alpha/include/lock.h b/sys/alpha/include/lock.h
index c2ae0fa..1066d46 100644
--- a/sys/alpha/include/lock.h
+++ b/sys/alpha/include/lock.h
@@ -35,10 +35,40 @@
* It is an error to hold one of these locks while a process is sleeping.
*/
struct simplelock {
- volatile int lock_data;
+ volatile u_int lock_data;
};
+/* functions in mp_machdep.c */
+void s_lock_init __P((struct simplelock *));
+void s_lock __P((struct simplelock *));
+int s_lock_try __P((struct simplelock *));
+void ss_lock __P((struct simplelock *));
+void ss_unlock __P((struct simplelock *));
+void s_lock_np __P((struct simplelock *));
+void s_unlock_np __P((struct simplelock *));
+
+/* inline simplelock functions */
+static __inline void
+s_unlock(struct simplelock *lkp)
+{
+ alpha_mb();
+ lkp->lock_data = 0;
+}
+
+#if !defined(SIMPLELOCK_DEBUG) && NCPUS > 1
+/*
+ * This set of defines turns on the real functions in i386/isa/apic_ipl.s.
+ */
+#define simple_lock_init(alp) s_lock_init(alp)
+#define simple_lock(alp) s_lock(alp)
+#define simple_lock_try(alp) s_lock_try(alp)
+#define simple_unlock(alp) s_unlock(alp)
+
+#endif /* !SIMPLELOCK_DEBUG && NCPUS > 1 */
+
#define COM_LOCK()
#define COM_UNLOCK()
+#define COM_DISABLE_INTR() COM_LOCK()
+#define COM_ENABLE_INTR() COM_UNLOCK()
#endif /* !_MACHINE_LOCK_H_ */
diff --git a/sys/alpha/include/mutex.h b/sys/alpha/include/mutex.h
new file mode 100644
index 0000000..ac13b8c
--- /dev/null
+++ b/sys/alpha/include/mutex.h
@@ -0,0 +1,563 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
+ * $FreeBSD$
+ */
+
+
+#ifndef _MACHINE_MUTEX_H_
+#define _MACHINE_MUTEX_H_
+
+#ifndef LOCORE
+
+#include <sys/queue.h>
+#include <machine/atomic.h>
+#include <machine/cpufunc.h>
+#include <machine/globaldata.h>
+
+/*
+ * Mutex flags
+ *
+ * Types
+ */
+#define MTX_DEF 0x1 /* Default (spin/sleep) */
+#define MTX_SPIN 0x2 /* Spin only lock */
+
+/* Options */
+#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
+#define MTX_NORECURSE 0x8 /* No recursion possible */
+#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
+#define MTX_NOSWITCH 0x20 /* Do not switch on release */
+#define MTX_FIRST 0x40 /* First spin lock holder */
+#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
+
+/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
+#define MTX_HARDOPTS (MTX_DEF | MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
+
+/* Flags/value used in mtx_lock */
+#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
+#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
+#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
+#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
+
+struct proc; /* XXX */
+
+/*
+ * Sleep/spin mutex
+ */
+struct mtx {
+ volatile u_int64_t mtx_lock; /* lock owner/gate/flags */
+ volatile u_int32_t mtx_recurse; /* number of recursive holds */
+ u_int32_t mtx_saveipl; /* saved ipl (for spin locks) */
+ char *mtx_description;
+ TAILQ_HEAD(, proc) mtx_blocked;
+ LIST_ENTRY(mtx) mtx_contested;
+ struct mtx *mtx_next; /* all locks in system */
+ struct mtx *mtx_prev;
+#ifdef SMP_DEBUG
+ /* If you add anything here, adjust the mtxf_t definition below */
+ struct witness *mtx_witness;
+ LIST_ENTRY(mtx) mtx_held;
+ char *mtx_file;
+ int mtx_line;
+#endif /* SMP_DEBUG */
+};
+
+typedef struct mtx mtx_t;
+
+/*
+ * Filler for structs which need to remain the same size
+ * whether or not SMP_DEBUG is turned on.
+ */
+typedef struct mtxf {
+#ifdef SMP_DEBUG
+ char mtxf_data[0];
+#else
+ char mtxf_data[4*sizeof(void *) + sizeof(int)];
+#endif
+} mtxf_t;
+
+#define mp_fixme(string)
+
+#ifdef _KERNEL
+/* Misc */
+#define CURTHD ((u_int64_t)CURPROC) /* Current thread ID */
+
+/* Prototypes */
+void mtx_init(mtx_t *m, char *description, int flag);
+void mtx_enter_hard(mtx_t *, int type, int ipl);
+void mtx_exit_hard(mtx_t *, int type);
+void mtx_destroy(mtx_t *m);
+
+/* Global locks */
+extern mtx_t sched_lock;
+extern mtx_t Giant;
+
+/*
+ * Used to replace return with an exit Giant and return.
+ */
+
+#define EGAR(a) \
+do { \
+ mtx_exit(&Giant, MTX_DEF); \
+ return (a); \
+} while (0)
+
+#define VEGAR \
+do { \
+ mtx_exit(&Giant, MTX_DEF); \
+ return; \
+} while (0)
+
+#define DROP_GIANT() \
+do { \
+ int _giantcnt; \
+ WITNESS_SAVE_DECL(Giant); \
+ \
+ WITNESS_SAVE(&Giant, Giant); \
+ for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
+ mtx_exit(&Giant, MTX_DEF)
+
+#define PICKUP_GIANT() \
+ mtx_assert(&Giant, MA_NOTOWNED); \
+ while (_giantcnt--) \
+ mtx_enter(&Giant, MTX_DEF); \
+ WITNESS_RESTORE(&Giant, Giant); \
+} while (0)
+
+#define PARTIAL_PICKUP_GIANT() \
+ mtx_assert(&Giant, MA_NOTOWNED); \
+ while (_giantcnt--) \
+ mtx_enter(&Giant, MTX_DEF); \
+ WITNESS_RESTORE(&Giant, Giant)
+
+
+/*
+ * Debugging
+ */
+#ifndef SMP_DEBUG
+#define mtx_assert(m, what)
+#else /* SMP_DEBUG */
+
+#define MA_OWNED 1
+#define MA_NOTOWNED 2
+#define mtx_assert(m, what) { \
+ switch ((what)) { \
+ case MA_OWNED: \
+ ASS(mtx_owned((m))); \
+ break; \
+ case MA_NOTOWNED: \
+ ASS(!mtx_owned((m))); \
+ break; \
+ default: \
+ panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
+ } \
+}
+
+#ifdef INVARIANTS
+#define ASS(ex) MPASS(ex)
+#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
+ #ex, __FILE__, __LINE__)
+#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
+ what, __FILE__, __LINE__)
+
+#ifdef MTX_STRS
+char STR_IEN[] = "fl & 0x200";
+char STR_IDIS[] = "!(fl & 0x200)";
+#else /* MTX_STRS */
+extern char STR_IEN[];
+extern char STR_IDIS[];
+#endif /* MTX_STRS */
+#define ASS_IEN MPASS2((alpha_pal_rdps & ALPHA_PSL_IPL_MASK)
+ == ALPHA_PSL_IPL_HIGH, STR_IEN)
+#define ASS_IDIS MPASS2((alpha_pal_rdps & ALPHA_PSL_IPL_MASK)
+ != ALPHA_PSL_IPL_HIGH, STR_IDIS)
+#endif /* INVARIANTS */
+
+#endif /* SMP_DEBUG */
+
+#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
+#define ASS(ex)
+#define MPASS(ex)
+#define MPASS2(ex, where)
+#define ASS_IEN
+#define ASS_IDIS
+#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
+
+#ifdef WITNESS
+#ifndef SMP_DEBUG
+#error WITNESS requires SMP_DEBUG
+#endif /* SMP_DEBUG */
+#define WITNESS_ENTER(m, f) \
+ if ((m)->mtx_witness != NULL) \
+ witness_enter((m), (f), __FILE__, __LINE__)
+#define WITNESS_EXIT(m, f) \
+ if ((m)->mtx_witness != NULL) \
+ witness_exit((m), (f), __FILE__, __LINE__)
+
+#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
+#define WITNESS_SAVE_DECL(n) \
+ char * __CONCAT(n, __wf); \
+ int __CONCAT(n, __wl)
+
+#define WITNESS_SAVE(m, n) \
+do { \
+ if ((m)->mtx_witness != NULL) \
+ witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
+} while (0)
+
+#define WITNESS_RESTORE(m, n) \
+do { \
+ if ((m)->mtx_witness != NULL) \
+ witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
+} while (0)
+
+void witness_init(mtx_t *, int flag);
+void witness_destroy(mtx_t *);
+void witness_enter(mtx_t *, int, char *, int);
+void witness_try_enter(mtx_t *, int, char *, int);
+void witness_exit(mtx_t *, int, char *, int);
+void witness_display(void(*)(const char *fmt, ...));
+void witness_list(struct proc *);
+int witness_sleep(int, mtx_t *, char *, int);
+void witness_save(mtx_t *, char **, int *);
+void witness_restore(mtx_t *, char *, int);
+#else /* WITNESS */
+#define WITNESS_ENTER(m, flag)
+#define WITNESS_EXIT(m, flag)
+#define WITNESS_SLEEP(check, m)
+#define WITNESS_SAVE_DECL(n)
+#define WITNESS_SAVE(m, n)
+#define WITNESS_RESTORE(m, n)
+
+/*
+ * flag++ is slezoid way of shutting up unused parameter warning
+ * in mtx_init()
+ */
+#define witness_init(m, flag) flag++
+#define witness_destroy(m)
+#define witness_enter(m, flag, f, l)
+#define witness_try_enter(m, flag, f, l )
+#define witness_exit(m, flag, f, l)
+#endif /* WITNESS */
+
+/*
+ * Assembly macros (for internal use only)
+ *--------------------------------------------------------------------------
+ */
+
+/*
+ * Get a sleep lock, deal with recursion inline
+ */
+
+#define _V(x) __STRING(x)
+
+#define _getlock_sleep(mp, tid, type) do { \
+ if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) { \
+ if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
+ mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
+ else { \
+ if (((mp)->mtx_lock & MTX_RECURSE) == 0) \
+ atomic_set_64(&(mp)->mtx_lock, MTX_RECURSE); \
+ (mp)->mtx_recurse++; \
+ } \
+ } else { \
+ alpha_mb(); \
+ } \
+} while (0)
+
+/*
+ * Get a spin lock, handle recusion inline (as the less common case)
+ */
+
+#define _getlock_spin_block(mp, tid, type) do { \
+ u_int _ipl = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK; \
+ if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
+ mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _ipl); \
+ else { \
+ alpha_mb(); \
+ (mp)->mtx_saveipl = _ipl; \
+ } \
+} while (0)
+
+/*
+ * Get a lock without any recursion handling. Calls the hard enter
+ * function if we can't get it inline.
+ */
+
+#define _getlock_norecurse(mp, tid, type) do { \
+ if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
+ mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
+ else \
+ alpha_mb(); \
+} while (0)
+
+/*
+ * Release a sleep lock assuming we haven't recursed on it, recursion is
+ * handled in the hard function.
+ */
+
+#define _exitlock_norecurse(mp, tid, type) do { \
+ alpha_mb(); \
+ if (atomic_cmpset_64(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) \
+ mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
+} while (0)
+
+/*
+ * Release a sleep lock when its likely we recursed (the code to
+ * deal with simple recursion is inline).
+ */
+
+#define _exitlock(mp, tid, type) do { \
+ alpha_mb(); \
+ if (atomic_cmpset_64(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) {\
+ if (((mp)->mtx_lock & MTX_RECURSE) && \
+ (--(mp)->mtx_recurse == 0)) \
+ atomic_clear_64(&(mp)->mtx_lock, MTX_RECURSE); \
+ else \
+ mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
+ } \
+} while (0)
+
+/*
+ * Release a spin lock (with possible recursion)
+ */
+
+#define _exitlock_spin(mp) do { \
+ int _ipl = (mp)->mtx_saveipl; \
+ alpha_mb(); \
+ if ((mp)->mtx_recurse == 0 || (--(mp)->mtx_recurse) == 0) \
+ atomic_cmpset_64(&(mp)->mtx_lock, (mp)->mtx_lock, \
+ MTX_UNOWNED); \
+ alpha_pal_swpipl(_ipl); \
+} while (0)
+
+/*
+ * Externally visible mutex functions
+ *------------------------------------------------------------------------
+ */
+
+/*
+ * Return non-zero if a mutex is already owned by the current thread
+ */
+#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
+
+/* Common strings */
+#ifdef MTX_STRS
+char STR_mtx_enter_fmt[] = "GOT %s [%p] at %s:%d r=%d";
+char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
+char STR_mtx_exit_fmt[] = "REL %s [%p] at %s:%d r=%d";
+char STR_mtx_owned[] = "mtx_owned(_mpp)";
+char STR_mtx_recurse[] = "_mpp->mtx_recurse == 0";
+char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] at %s:%d result=%d";
+#else /* MTX_STRS */
+extern char STR_mtx_enter_fmt[];
+extern char STR_mtx_bad_type[];
+extern char STR_mtx_exit_fmt[];
+extern char STR_mtx_owned[];
+extern char STR_mtx_recurse[];
+extern char STR_mtx_try_enter_fmt[];
+#endif /* MTX_STRS */
+
+/*
+ * Get lock 'm', the macro handles the easy (and most common cases) and
+ * leaves the slow stuff to the mtx_enter_hard() function.
+ *
+ * Note: since type is usually a constant much of this code is optimized out
+ */
+#define mtx_enter(mtxp, type) do { \
+ mtx_t * _mpp = mtxp; \
+ \
+ /* bits only valid on mtx_exit() */ \
+ MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0, STR_mtx_bad_type); \
+ \
+ do { \
+ if ((type) & MTX_SPIN) { \
+ /* \
+ * Easy cases of spin locks: \
+ * \
+ * 1) We already own the lock and will simply \
+ * recurse on it (if RLIKELY) \
+ * \
+ * 2) The lock is free, we just get it \
+ */ \
+ if ((type) & MTX_RLIKELY) { \
+ /* \
+ * Check for recursion, if we already \
+ * have this lock we just bump the \
+ * recursion count. \
+ */ \
+ if (_mpp->mtx_lock == CURTHD) { \
+ _mpp->mtx_recurse++; \
+ break; /* Done */ \
+ } \
+ } \
+ \
+ if (((type) & MTX_TOPHALF) == 0) \
+ /* \
+ * If an interrupt thread uses this \
+ * we must block interrupts here. \
+ */ \
+ _getlock_spin_block(_mpp, CURTHD, \
+ (type) & MTX_HARDOPTS); \
+ else \
+ _getlock_norecurse(_mpp, CURTHD, \
+ (type) & MTX_HARDOPTS); \
+ } else { \
+ /* Sleep locks */ \
+ if ((type) & MTX_RLIKELY) \
+ _getlock_sleep(_mpp, CURTHD, \
+ (type) & MTX_HARDOPTS); \
+ else \
+ _getlock_norecurse(_mpp, CURTHD, \
+ (type) & MTX_HARDOPTS); \
+ } \
+ } while (0); \
+ WITNESS_ENTER(_mpp, type); \
+ CTR5(KTR_LOCK, STR_mtx_enter_fmt, \
+ (_mpp)->mtx_description, (_mpp), __FILE__, __LINE__, \
+ (_mpp)->mtx_recurse); \
+} while (0)
+
+/*
+ * Attempt to get MTX_DEF lock, return non-zero if lock acquired
+ *
+ * XXX DOES NOT HANDLE RECURSION
+ */
+#ifdef SMP_DEBUG
+#define mtx_try_enter(mtxp, type) ({ \
+ mtx_t *const _mpp = mtxp; \
+ int _rval; \
+ \
+ _rval = atomic_cmpset_int(&_mpp->mtx_lock, MTX_UNOWNED, CURTHD);\
+ if (_rval && (_mpp)->mtx_witness != NULL) { \
+ ASS((_mpp)->mtx_recurse == 0); \
+ witness_try_enter(_mpp, type, __FILE__, __LINE__); \
+ } \
+ CTR5(KTR_LOCK, STR_mtx_try_enter_fmt, \
+ (_mpp)->mtx_description, (_mpp), __FILE__, __LINE__, \
+ _rval); \
+ _rval; \
+})
+
+#else /* SMP_DEBUG */
+
+#define mtx_try_enter(mtxp, type) ({ \
+ mtx_t *const _mpp = mtxp; \
+ int _rval; \
+ \
+ _rval = atomic_cmpset_int(&_mpp->mtx_lock, MTX_UNOWNED, CURTHD);\
+ CTR5(KTR_LOCK, STR_mtx_try_enter_fmt, \
+ (_mpp)->mtx_description, (_mpp), __FILE__, __LINE__, \
+ _rval); \
+ _rval; \
+})
+
+#endif /* SMP_DEBUG */
+
+#if 0
+#define mtx_legal2block() ({ \
+ register int _l2b; \
+ __asm __volatile ( \
+" pushfl;" \
+" popl %%eax;" \
+" andl $0x200, %%eax;" \
+ : "=a" (_l2b) \
+ : \
+ : "cc"); \
+ _l2b; \
+})
+#endif
+
+#define mtx_legal2block() (read_eflags() & 0x200)
+
+/*
+ * Release lock m
+ */
+#define mtx_exit(mtxp, type) do { \
+ mtx_t *const _mpp = mtxp; \
+ \
+ MPASS2(mtx_owned(_mpp), STR_mtx_owned); \
+ WITNESS_EXIT(_mpp, type); \
+ CTR5(KTR_LOCK, STR_mtx_exit_fmt, \
+ (_mpp)->mtx_description, (_mpp), __FILE__, __LINE__, \
+ (_mpp)->mtx_recurse); \
+ if ((type) & MTX_SPIN) { \
+ if ((type) & MTX_NORECURSE) { \
+ MPASS2(_mpp->mtx_recurse == 0, STR_mtx_recurse); \
+ atomic_cmpset_64(&_mpp->mtx_lock, _mpp->mtx_lock, \
+ MTX_UNOWNED); \
+ if (((type) & MTX_TOPHALF) == 0) { \
+ splx(_mpp->mtx_saveipl); \
+ } \
+ } else \
+ if ((type) & MTX_TOPHALF) \
+ _exitlock_norecurse(_mpp, CURTHD, \
+ (type) & MTX_HARDOPTS); \
+ else \
+ _exitlock_spin(_mpp); \
+ } else { \
+ /* Handle sleep locks */ \
+ if ((type) & MTX_RLIKELY) \
+ _exitlock(_mpp, CURTHD, (type) & MTX_HARDOPTS); \
+ else \
+ _exitlock_norecurse(_mpp, CURTHD, \
+ (type) & MTX_HARDOPTS); \
+ } \
+} while (0)
+#endif /* _KERNEL */
+
+#else /* !LOCORE */
+
+/*
+ * Simple assembly macros to get and release non-recursive spin locks
+ */
+#define MTX_ENTER(lck) \
+ call_pal PAL_OSF1_rdps; \
+ and v0, ALPHA_PSL_IPL_MASK, v0; \
+1: ldq_l a0, lck+MTX_LOCK; \
+ cmpeq a0, MTX_UNOWNED, a1; \
+ beq a1, 1b; \
+ ldq a0, PC_CURPROC(globalp); \
+ stq_c a0, lck+MTX_LOCK; \
+ beq a0, 1b; \
+ mb; \
+ stl v0, lck+MTX_SAVEIPL; \
+ ldq a0, ALPHA_PSL_IPL_HIGH; \
+ call_pal PSL_OSF1_swpipl
+
+#define MTX_EXIT(lck) \
+ mb; \
+ ldiq a0, MTX_UNOWNED; \
+ stq a0, lck+MTX_LOCK; \
+ ldl a0, lck+MTX_SAVEIPL; \
+ call_pal PAL_OSF1_swpipl
+
+#endif /* !LOCORE */
+
+#endif /* __MACHINE_MUTEX_H */
diff --git a/sys/alpha/include/param.h b/sys/alpha/include/param.h
index 80dce22..742a3f7 100644
--- a/sys/alpha/include/param.h
+++ b/sys/alpha/include/param.h
@@ -70,7 +70,11 @@
#define OBJFORMAT_NAMES "elf"
#define OBJFORMAT_DEFAULT "elf"
+#ifdef SMP
+#define NCPUS 32
+#else
#define NCPUS 1
+#endif
/*
* Round p (pointer or byte index) up to a correctly-aligned value for all
diff --git a/sys/alpha/include/pcb.h b/sys/alpha/include/pcb.h
index 3caa144..3bf2586 100644
--- a/sys/alpha/include/pcb.h
+++ b/sys/alpha/include/pcb.h
@@ -30,7 +30,7 @@
#include <machine/frame.h>
#include <machine/reg.h>
-
+#include <machine/globaldata.h>
#include <machine/alpha_cpu.h>
/*
@@ -53,6 +53,7 @@ struct pcb {
u_int64_t pcb_fp_control; /* IEEE control word [SW] */
unsigned long pcb_onfault; /* for copy faults [SW] */
unsigned long pcb_accessaddr; /* for [fs]uswintr [SW] */
+ u_int32_t pcb_schednest; /* state of sched_lock [SW] */
};
/*
@@ -64,3 +65,9 @@ struct md_coredump {
struct trapframe md_tf;
struct fpreg md_fpstate;
};
+
+#ifdef _KERNEL
+#ifndef curpcb
+extern struct pcb *curpcb; /* our current running pcb */
+#endif
+#endif
diff --git a/sys/alpha/include/pcpu.h b/sys/alpha/include/pcpu.h
new file mode 100644
index 0000000..b246bb1
--- /dev/null
+++ b/sys/alpha/include/pcpu.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_GLOBALDATA_H_
+#define _MACHINE_GLOBALDATA_H_
+
+#ifdef _KERNEL
+
+#include <sys/queue.h>
+
+/*
+ * This structure maps out the global data that needs to be kept on a
+ * per-cpu basis. genassym uses this to generate offsets for the assembler
+ * code, which also provides external symbols so that C can get at them as
+ * though they were really globals. This structure is pointed to by
+ * the per-cpu system value (see alpha_pal_rdval() and alpha_pal_wrval()).
+ * Inside the kernel, the globally reserved register t7 is used to
+ * point at the globaldata structure.
+ */
+struct globaldata {
+ struct alpha_pcb gd_idlepcb; /* pcb for idling */
+ struct proc *gd_curproc; /* current process */
+ struct proc *gd_idleproc; /* idle process */
+ struct proc *gd_fpcurproc; /* fp state owner */
+ struct pcb *gd_curpcb; /* current pcb */
+ struct timeval gd_switchtime;
+ int gd_switchticks;
+ u_int gd_cpuno; /* this cpu number */
+ u_int gd_other_cpus; /* all other cpus */
+ int gd_inside_intr;
+ u_int64_t gd_idlepcbphys; /* pa of gd_idlepcb */
+ u_int64_t gd_pending_ipis; /* pending IPI events */
+ u_int32_t gd_next_asn; /* next ASN to allocate */
+ u_int32_t gd_current_asngen; /* ASN rollover check */
+ u_int32_t gd_intr_nesting_level; /* interrupt recursion */
+
+ u_int gd_astpending;
+ SLIST_ENTRY(globaldata) gd_allcpu;
+#ifdef KTR_PERCPU
+ volatile int gd_ktr_idx; /* Index into trace table */
+ char *gd_ktr_buf;
+ char gd_ktr_buf_data[0];
+#endif
+};
+
+SLIST_HEAD(cpuhead, globaldata);
+extern struct cpuhead cpuhead;
+
+void globaldata_init(struct globaldata *pcpu, int cpuno, size_t sz);
+struct globaldata *globaldata_find(int cpuno);
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_GLOBALDATA_H_ */
diff --git a/sys/alpha/include/pmap.h b/sys/alpha/include/pmap.h
index 134c9a2..de59b66 100644
--- a/sys/alpha/include/pmap.h
+++ b/sys/alpha/include/pmap.h
@@ -174,9 +174,11 @@ struct pmap {
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
int pm_count; /* reference count */
int pm_flags; /* pmap flags */
- int pm_active; /* active flag */
- int pm_asn; /* address space number */
- u_int pm_asngen; /* generation number of pm_asn */
+ u_int32_t pm_active; /* active cpus */
+ struct {
+ u_int32_t asn:8; /* address space number */
+ u_int32_t gen:24; /* generation number */
+ } pm_asn[NCPUS];
struct pmap_statistics pm_stats; /* pmap statistics */
struct vm_page *pm_ptphint; /* pmap ptp hint */
};
diff --git a/sys/alpha/include/proc.h b/sys/alpha/include/proc.h
index 502b607..d003816 100644
--- a/sys/alpha/include/proc.h
+++ b/sys/alpha/include/proc.h
@@ -28,6 +28,12 @@
* rights to redistribute these changes.
*/
+#ifndef _MACHINE_PROC_H_
+#define _MACHINE_PROC_H_
+
+#include <machine/globaldata.h>
+#include <machine/globals.h>
+
/*
* Machine-dependent part of the proc struct for the Alpha.
*/
@@ -55,3 +61,5 @@ struct mdproc {
#define MDP_UAC_SIGBUS 0x0040 /* Deliver SIGBUS upon
unaligned access */
#define MDP_UAC_MASK (MDP_UAC_NOPRINT | MDP_UAC_NOFIX | MDP_UAC_SIGBUS)
+
+#endif /* !_MACHINE_PROC_H_ */
diff --git a/sys/alpha/include/rpb.h b/sys/alpha/include/rpb.h
index 1f2f884..0be0775 100644
--- a/sys/alpha/include/rpb.h
+++ b/sys/alpha/include/rpb.h
@@ -219,7 +219,8 @@ struct rpb {
* PCS: Per-CPU information.
*/
struct pcs {
- u_int8_t pcs_hwpcb[128]; /* 0: PAL dependent */
+
+ u_int64_t pcs_hwpcb[16]; /* 0: PAL dependent */
#define PCS_BIP 0x000001 /* boot in progress */
#define PCS_RC 0x000002 /* restart possible */
@@ -238,12 +239,12 @@ struct pcs {
#define PCS_HALT_WARM_BOOT 0x030000
#define PCS_HALT_STAY_HALTED 0x040000
#define PCS_mbz 0xffffffffff000000 /* 24:63 -- must be zero */
- u_int64_t pcs_flags; /* 80: */
+ u_int64_t pcs_flags; /* 128: */
- u_int64_t pcs_pal_memsize; /* 88: PAL memory size */
- u_int64_t pcs_pal_scrsize; /* 90: PAL scratch size */
- vm_offset_t pcs_pal_memaddr; /* 98: PAL memory addr */
- vm_offset_t pcs_pal_scraddr; /* A0: PAL scratch addr */
+ u_int64_t pcs_pal_memsize; /* 136: PAL memory size */
+ u_int64_t pcs_pal_scrsize; /* 144: PAL scratch size */
+ vm_offset_t pcs_pal_memaddr; /* 152: PAL memory addr */
+ vm_offset_t pcs_pal_scraddr; /* 160: PAL scratch addr */
struct {
u_int64_t
minorrev : 8, /* alphabetic char 'a' - 'z' */
@@ -261,14 +262,14 @@ struct pcs {
sbz1 : 8,
compatibility : 16, /* Compatibility revision */
proc_cnt : 16; /* Processor count */
- } pcs_pal_rev; /* A8: */
+ } pcs_pal_rev; /* 168: */
#define pcs_minorrev pcs_pal_rev.minorrev
#define pcs_majorrev pcs_pal_rev.majorrev
#define pcs_pal_type pcs_pal_rev.pal_type
#define pcs_compatibility pcs_pal_rev.compatibility
#define pcs_proc_cnt pcs_pal_rev.proc_cnt
- u_int64_t pcs_proc_type; /* B0: processor type */
+ u_int64_t pcs_proc_type; /* 176: processor type */
#define PCS_PROC_MAJOR 0x00000000ffffffff
#define PCS_PROC_MAJORSHIFT 0
@@ -288,23 +289,23 @@ struct pcs {
/* Minor number interpretation is processor specific. See cpu.c. */
- u_int64_t pcs_proc_var; /* B8: processor variation. */
+ u_int64_t pcs_proc_var; /* 184: processor variation. */
#define PCS_VAR_VAXFP 0x0000000000000001 /* VAX FP support */
#define PCS_VAR_IEEEFP 0x0000000000000002 /* IEEE FP support */
#define PCS_VAR_PE 0x0000000000000004 /* Primary Eligible */
#define PCS_VAR_RESERVED 0xfffffffffffffff8 /* Reserved */
- char pcs_proc_revision[8]; /* C0: only first 4 valid */
- char pcs_proc_sn[16]; /* C8: only first 10 valid */
- vm_offset_t pcs_machcheck; /* D8: mach chk phys addr. */
- u_int64_t pcs_machcheck_len; /* E0: length in bytes */
- vm_offset_t pcs_halt_pcbb; /* E8: phys addr of halt PCB */
- vm_offset_t pcs_halt_pc; /* F0: halt PC */
- u_int64_t pcs_halt_ps; /* F8: halt PS */
- u_int64_t pcs_halt_r25; /* 100: halt argument list */
- u_int64_t pcs_halt_r26; /* 108: halt return addr list */
- u_int64_t pcs_halt_r27; /* 110: halt procedure value */
+ char pcs_proc_revision[8]; /* 192: only first 4 valid */
+ char pcs_proc_sn[16]; /* 200: only first 10 valid */
+ vm_offset_t pcs_machcheck; /* 216: mach chk phys addr. */
+ u_int64_t pcs_machcheck_len; /* 224: length in bytes */
+ vm_offset_t pcs_halt_pcbb; /* 232: pa of halt PCB */
+ vm_offset_t pcs_halt_pc; /* 240: halt PC */
+ u_int64_t pcs_halt_ps; /* 248: halt PS */
+ u_int64_t pcs_halt_r25; /* 256: halt argument list */
+ u_int64_t pcs_halt_r26; /* 264: halt ra list */
+ u_int64_t pcs_halt_r27; /* 272: halt procedure value */
#define PCS_HALT_RESERVED 0
#define PCS_HALT_POWERUP 1
@@ -315,17 +316,22 @@ struct pcs {
#define PCS_HALT_DOUBLE_ERROR_ABORT 6
#define PCS_HALT_SCBB 7
#define PCS_HALT_PTBR 8 /* 9-FF: reserved */
- u_int64_t pcs_halt_reason; /* 118: */
+ u_int64_t pcs_halt_reason; /* 280: */
- u_int64_t pcs_reserved_soft; /* 120: preserved software */
- u_int64_t pcs_buffer[21]; /* 128: console buffers */
+ u_int64_t pcs_reserved_soft; /* 288: preserved software */
+ struct {
+ u_int32_t rxlen;
+ u_int32_t txlen;
+ char rxbuf[80];
+ char txbuf[80];
+ } pcs_buffer; /* 296: console buffers */
#define PALvar_reserved 0
#define PALvar_OpenVMS 1
#define PALvar_OSF1 2
- u_int64_t pcs_palrevisions[16]; /* 1D0: PALcode revisions */
+ u_int64_t pcs_palrevisions[16]; /* 464: PALcode revisions */
- u_int64_t pcs_reserved_arch[6]; /* 250: reserved arch */
+ u_int64_t pcs_reserved_arch[6]; /* 592: reserved arch */
};
/*
diff --git a/sys/alpha/include/smp.h b/sys/alpha/include/smp.h
index 48d6737..00aec6a 100644
--- a/sys/alpha/include/smp.h
+++ b/sys/alpha/include/smp.h
@@ -1,10 +1,57 @@
/*
+ * ----------------------------------------------------------------------------
+ * "THE BEER-WARE LICENSE" (Revision 42):
+ * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
+ * can do whatever you want with this stuff. If we meet some day, and you think
+ * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
+ * ----------------------------------------------------------------------------
+ *
* $FreeBSD$
+ *
*/
+
#ifndef _MACHINE_SMP_H_
#define _MACHINE_SMP_H_
-#define get_mplock() { }
-#define rel_mplock() { }
+#ifdef _KERNEL
+
+#include <machine/mutex.h>
+#include <machine/ipl.h>
+#include <sys/ktr.h>
+
+#ifndef LOCORE
+
+#define BETTER_CLOCK /* unconditional on alpha */
+
+/* global data in mp_machdep.c */
+extern volatile u_int checkstate_probed_cpus;
+extern volatile u_int checkstate_need_ast;
+extern volatile u_int resched_cpus;
+extern void (*cpustop_restartfunc) __P((void));
+
+extern int smp_active;
+extern int mp_ncpus;
+extern u_int all_cpus;
+extern u_int started_cpus;
+extern u_int stopped_cpus;
+
+/* functions in mp_machdep.c */
+void mp_start(void);
+void mp_announce(void);
+void smp_invltlb(void);
+void forward_statclock(int pscnt);
+void forward_hardclock(int pscnt);
+void forward_signal(struct proc *);
+void forward_roundrobin(void);
+int stop_cpus(u_int);
+int restart_cpus(u_int);
+void smp_rendezvous_action(void);
+void smp_rendezvous(void (*)(void *),
+ void (*)(void *),
+ void (*)(void *),
+ void *arg);
+void smp_init_secondary(void);
-#endif
+#endif /* !LOCORE */
+#endif /* _KERNEL */
+#endif /* _MACHINE_SMP_H_ */
OpenPOWER on IntegriCloud