summaryrefslogtreecommitdiffstats
path: root/sys/alpha
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2000-10-20 07:26:37 +0000
committerjhb <jhb@FreeBSD.org>2000-10-20 07:26:37 +0000
commitf671832d7667351a66ab594f63d8cd4ef66e8e91 (patch)
tree99efa7c13c407257b68406e364261eeb1b2c1972 /sys/alpha
parent787712af1c7ffb2642f8250f2111301ad77fdaf8 (diff)
downloadFreeBSD-src-f671832d7667351a66ab594f63d8cd4ef66e8e91.zip
FreeBSD-src-f671832d7667351a66ab594f63d8cd4ef66e8e91.tar.gz
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions of the mutex code are in machine/mutex.h now, which include the assembly macros for handling mutexes as well as optionally overriding the mutex micro-operations. For example, we use optimized micro-ops on the x86 platform #ifndef I386_CPU. - Change the behavior of the SMP_DEBUG kernel option. In the new code, mtx_assert() only depends on INVARIANTS, allowing other kernel developers to have working mutex assertiions without having to include all of the mutex debugging code. The SMP_DEBUG kernel option has been renamed to MUTEX_DEBUG and now just controls extra mutex debugging code. - Abolish the ugly mtx_f hack. Instead, we dynamically allocate seperate mtx_debug structures on the fly in mtx_init, except for mutexes that are initiated very early in the boot process. These mutexes are declared using a special MUTEX_DECLARE() macro, and use a new flag MTX_COLD when calling mtx_init. This is still somewhat hackish, but it is less evil than the mtx_f filler struct, and the mtx struct is now the same size with and without mutex debugging code. - Add some micro-micro-operation macros for doing the actual atomic operations on the mutex mtx_lock field to make it easier for other archs to override/optimize mutex ops if needed. These new tiny ops also clean up the code in some places by replacing long atomic operation function calls that spanned 2-3 lines with a short 1-line macro call. - Don't call mi_switch() from mtx_enter_hard() when we block while trying to obtain a sleep mutex. Calling mi_switch() would bogusly release Giant before switching to the next process. Instead, inline most of the code from mi_switch() in the mtx_enter_hard() function. Note that when we finally kill Giant we can back this out and go back to calling mi_switch().
Diffstat (limited to 'sys/alpha')
-rw-r--r--sys/alpha/alpha/synch_machdep.c550
-rw-r--r--sys/alpha/include/mutex.h480
2 files changed, 12 insertions, 1018 deletions
diff --git a/sys/alpha/alpha/synch_machdep.c b/sys/alpha/alpha/synch_machdep.c
deleted file mode 100644
index 184da83..0000000
--- a/sys/alpha/alpha/synch_machdep.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*-
- * Copyright (c) 1997, 1998 Berkeley Software Design, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Berkeley Software Design Inc's name may not be used to endorse or
- * promote products derived from this software without specific prior
- * written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
- * $FreeBSD$
- */
-
-#define MTX_STRS /* define common strings */
-
-#include <sys/param.h>
-#include <sys/proc.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/ktr.h>
-#include <vm/vm.h>
-#include <vm/vm_extern.h>
-#include <ddb/ddb.h>
-#include <machine/atomic.h>
-#include <machine/clock.h>
-#include <machine/cpu.h>
-#include <machine/mutex.h>
-
-/* All mutexes in system (used for debug/panic) */
-struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
- TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
- { NULL, NULL }, &all_mtx, &all_mtx
-#ifdef SMP_DEBUG
- , NULL, { NULL, NULL }, NULL, 0
-#endif
-};
-
-int mtx_cur_cnt;
-int mtx_max_cnt;
-
-extern void _mtx_enter_giant_def(void);
-extern void _mtx_exit_giant_def(void);
-
-static void propagate_priority(struct proc *) __unused;
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((u_int64_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-
-/*
- * XXX Temporary, for use from assembly language
- */
-
-void
-_mtx_enter_giant_def(void)
-{
-
- mtx_enter(&Giant, MTX_DEF);
-}
-
-void
-_mtx_exit_giant_def(void)
-{
-
- mtx_exit(&Giant, MTX_DEF);
-}
-
-static void
-propagate_priority(struct proc *p)
-{
- int pri = p->p_priority;
- struct mtx *m = p->p_blocked;
-
- for (;;) {
- struct proc *p1;
-
- p = mtx_owner(m);
-
- if (p == NULL) {
- /*
- * This really isn't quite right. Really
- * ought to bump priority of process that
- * next acquires the mutex.
- */
- MPASS(m->mtx_lock == MTX_CONTESTED);
- return;
- }
- MPASS(p->p_magic == P_MAGIC);
- if (p->p_priority <= pri)
- return;
- /*
- * If lock holder is actually running, just bump priority.
- */
- if (TAILQ_NEXT(p, p_procq) == NULL) {
- MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
- SET_PRIO(p, pri);
- return;
- }
- /*
- * If on run queue move to new run queue, and
- * quit.
- */
- if (p->p_stat == SRUN) {
- MPASS(p->p_blocked == NULL);
- remrunqueue(p);
- SET_PRIO(p, pri);
- setrunqueue(p);
- return;
- }
-
- /*
- * If we aren't blocked on a mutex, give up and quit.
- */
- if (p->p_stat != SMTX) {
- return;
- }
-
- /*
- * Pick up the mutex that p is blocked on.
- */
- m = p->p_blocked;
- MPASS(m != NULL);
-
- /*
- * Check if the proc needs to be moved up on
- * the blocked chain
- */
- if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
- p1->p_priority <= pri)
- continue;
-
- /*
- * Remove proc from blocked chain
- */
- TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
- MPASS(p1->p_magic == P_MAGIC);
- if (p1->p_priority > pri)
- break;
- }
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- CTR4(KTR_LOCK,
- "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
- p, p1, m, m->mtx_description);
- }
-}
-
-void
-mtx_enter_hard(struct mtx *m, int type, int ipl)
-{
- struct proc *p = CURPROC;
-
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
-
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (u_int64_t)p) {
- m->mtx_recurse++;
- atomic_set_64(&m->mtx_lock, MTX_RECURSE);
- CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
- return;
- }
- CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%lx) [0x%lx]",
- m, m->mtx_lock, RETIP(m));
- while (!atomic_cmpset_64(&m->mtx_lock, MTX_UNOWNED,
- (u_int64_t)p)) {
- int v;
- struct proc *p1;
-
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (u_int64_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_64(&m->mtx_lock, v,
- v | MTX_CONTESTED)) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
-
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
-
-#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
- }
-#endif
-
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
-
- p->p_blocked = m; /* Who we're blocked on */
- p->p_stat = SMTX;
-#if 0
- propagate_priority(p);
-#endif
- CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- CTR3(KTR_LOCK,
- "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- alpha_mb();
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (u_int64_t)p) {
- m->mtx_recurse++;
- return;
- }
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (atomic_cmpset_64(&m->mtx_lock, MTX_UNOWNED,
- (u_int64_t)p)) {
- alpha_mb();
- break;
- }
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
-#ifdef DDB
- else if (!db_active)
-#else
- else
-#endif
- panic(
- "spin lock %s held by 0x%lx for > 5 seconds",
- m->mtx_description, m->mtx_lock);
- }
- }
-
-#ifdef SMP_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveipl = 0xbeefface;
- else
-#endif
- m->mtx_saveipl = ipl;
- CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
- return;
- }
- }
-}
-
-void
-mtx_exit_hard(struct mtx *m, int type)
-{
- struct proc *p, *p1;
- struct mtx *m1;
- int pri;
-
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (m->mtx_recurse != 0) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_64(&m->mtx_lock, MTX_RECURSE);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
- p = CURPROC;
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- atomic_cmpset_64(&m->mtx_lock, m->mtx_lock,
- MTX_UNOWNED);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
- } else
- m->mtx_lock = MTX_CONTESTED;
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
- m, p1);
- p1->p_blocked = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
-#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
- }
-#endif
- setrunqueue(p);
- CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%lx",
- m, m->mtx_lock);
- mi_switch();
- CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%lx",
- m, m->mtx_lock);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (m->mtx_recurse != 0) {
- m->mtx_recurse--;
- return;
- }
- alpha_mb();
- if (atomic_cmpset_64(&m->mtx_lock, CURTHD, MTX_UNOWNED)) {
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveipl != 0xbeefface);
- alpha_pal_swpipl(m->mtx_saveipl);
- }
- return;
- }
- panic("unsucuessful release of spin lock");
- case MTX_SPIN | MTX_TOPHALF:
- if (m->mtx_recurse != 0) {
- m->mtx_recurse--;
- return;
- }
- alpha_mb();
- if (atomic_cmpset_64(&m->mtx_lock, CURTHD, MTX_UNOWNED))
- return;
- panic("unsucuessful release of spin lock");
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
- }
-}
-
-#define MV_DESTROY 0 /* validate before destory */
-#define MV_INIT 1 /* validate before init */
-
-#ifdef SMP_DEBUG
-
-#define ISK0SEG(va) \
- ((va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END))
-
-int mtx_validate __P((struct mtx *, int));
-
-int
-mtx_validate(struct mtx *m, int when)
-{
- struct mtx *mp;
- int i;
- int retval = 0;
-
- if (m == &all_mtx || cold)
- return 0;
-
- mtx_enter(&all_mtx, MTX_DEF);
- ASS(ISK0SEG((vm_offset_t)all_mtx.mtx_next) ||
- kernacc((caddr_t)all_mtx.mtx_next, 4, 1) == 1);
- ASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
- for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- if (!ISK0SEG((vm_offset_t)all_mtx.mtx_next) &&
- kernacc((caddr_t)mp->mtx_next, 4, 1) != 1) {
- panic("mtx_validate: mp=%p mp->mtx_next=%p",
- mp, mp->mtx_next);
- }
- i++;
- if (i > mtx_cur_cnt) {
- panic("mtx_validate: too many in chain, known=%d\n",
- mtx_cur_cnt);
- }
- }
- ASS(i == mtx_cur_cnt);
- switch (when) {
- case MV_DESTROY:
- for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
- if (mp == m)
- break;
- ASS(mp == m);
- break;
- case MV_INIT:
- for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
- if (mp == m) {
- /*
- * Not good. This mutex already exists.
- */
- retval = 1;
-#if 1
- printf("re-initing existing mutex %s\n",
- m->mtx_description);
- ASS(m->mtx_lock == MTX_UNOWNED);
- retval = 1;
-#else
- panic("re-initing existing mutex %s",
- m->mtx_description);
-#endif
- }
- }
- mtx_exit(&all_mtx, MTX_DEF);
- return (retval);
-}
-#endif
-
-void
-mtx_init(struct mtx *m, char *t, int flag)
-{
-
- CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
-#ifdef SMP_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
- return;
-#endif
- bzero((void *)m, sizeof *m);
- TAILQ_INIT(&m->mtx_blocked);
- m->mtx_description = t;
- m->mtx_lock = MTX_UNOWNED;
- /* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
- m->mtx_next = &all_mtx;
- m->mtx_prev = all_mtx.mtx_prev;
- m->mtx_prev->mtx_next = m;
- all_mtx.mtx_prev = m;
- if (++mtx_cur_cnt > mtx_max_cnt)
- mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
- witness_init(m, flag);
-}
-
-void
-mtx_destroy(struct mtx *m)
-{
-
- CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
-#ifdef SMP_DEBUG
- if (m->mtx_next == NULL)
- panic("mtx_destroy: %p (%s) already destroyed",
- m, m->mtx_description);
-
- if (!mtx_owned(m)) {
- ASS(m->mtx_lock == MTX_UNOWNED);
- } else {
- ASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
- }
- mtx_validate(m, MV_DESTROY); /* diagnostic */
-#endif
-
-#ifdef WITNESS
- if (m->mtx_witness)
- witness_destroy(m);
-#endif /* WITNESS */
-
- /* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
- m->mtx_next->mtx_prev = m->mtx_prev;
- m->mtx_prev->mtx_next = m->mtx_next;
-#ifdef SMP_DEBUG
- m->mtx_next = m->mtx_prev = NULL;
-#endif
- mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
-}
diff --git a/sys/alpha/include/mutex.h b/sys/alpha/include/mutex.h
index 12b12a8..5ccc28d 100644
--- a/sys/alpha/include/mutex.h
+++ b/sys/alpha/include/mutex.h
@@ -33,293 +33,39 @@
#define _MACHINE_MUTEX_H_
#ifndef LOCORE
-#include <sys/queue.h>
#ifdef _KERNEL
-#include <sys/ktr.h>
-#include <sys/proc.h> /* Needed for curproc. */
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/globaldata.h>
-#include <machine/globals.h>
-
-/*
- * If kern_mutex.c is being built, compile non-inlined versions of various
- * functions so that kernel modules can use them.
- */
-#ifndef _KERN_MUTEX_C_
-#define _MTX_INLINE static __inline
-#else
-#define _MTX_INLINE
-#endif
-
-/*
- * Mutex flags
- *
- * Types
- */
-#define MTX_DEF 0x1 /* Default (spin/sleep) */
-#define MTX_SPIN 0x2 /* Spin only lock */
-
-/* Options */
-#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
-#define MTX_NORECURSE 0x8 /* No recursion possible */
-#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
-#define MTX_NOSWITCH 0x20 /* Do not switch on release */
-#define MTX_FIRST 0x40 /* First spin lock holder */
-#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
-
-/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
-#define MTX_HARDOPTS (MTX_DEF | MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
-
-/* Flags/value used in mtx_lock */
-#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
-#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
-#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
-#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
-
-#endif /* _KERNEL */
-
-/*
- * Sleep/spin mutex
- */
-struct mtx {
- volatile u_int64_t mtx_lock; /* lock owner/gate/flags */
- volatile u_int32_t mtx_recurse; /* number of recursive holds */
- u_int32_t mtx_saveipl; /* saved ipl (for spin locks) */
- char *mtx_description;
- TAILQ_HEAD(, proc) mtx_blocked;
- LIST_ENTRY(mtx) mtx_contested;
- struct mtx *mtx_next; /* all locks in system */
- struct mtx *mtx_prev;
-#ifdef SMP_DEBUG
- /* If you add anything here, adjust the mtxf_t definition below */
- struct witness *mtx_witness;
- LIST_ENTRY(mtx) mtx_held;
- const char *mtx_file;
- int mtx_line;
-#endif /* SMP_DEBUG */
-};
-
-/*
- * Filler for structs which need to remain the same size
- * whether or not SMP_DEBUG is turned on.
- */
-typedef struct mtxf {
-#ifdef SMP_DEBUG
- char mtxf_data[0];
-#else
- char mtxf_data[4*sizeof(void *) + sizeof(int)];
-#endif
-} mtxf_t;
-
-#define mp_fixme(string)
-
-#ifdef _KERNEL
-/* Misc */
-#define CURTHD ((u_int64_t)CURPROC) /* Current thread ID */
-
-/* Prototypes */
-void mtx_init(struct mtx *m, char *description, int flag);
-void mtx_enter_hard(struct mtx *, int type, int ipl);
-void mtx_exit_hard(struct mtx *, int type);
-void mtx_destroy(struct mtx *m);
-
-/*
- * Wrap the following functions with cpp macros so that filenames and line
- * numbers are embedded in the code correctly.
- */
-#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
-void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
-int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
-void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
-#endif
-
-#define mtx_enter(mtxp, type) \
- _mtx_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_try_enter(mtxp, type) \
- _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_exit(mtxp, type) \
- _mtx_exit((mtxp), (type), __FILE__, __LINE__)
-
-/* Global locks */
-extern struct mtx sched_lock;
-extern struct mtx Giant;
-
-/*
- * Used to replace return with an exit Giant and return.
- */
-
-#define EGAR(a) \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return (a); \
-} while (0)
-
-#define VEGAR \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return; \
-} while (0)
-
-#define DROP_GIANT() \
-do { \
- int _giantcnt; \
- WITNESS_SAVE_DECL(Giant); \
- \
- WITNESS_SAVE(&Giant, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF)
-
-#define PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant); \
-} while (0)
-
-#define PARTIAL_PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant)
-
/*
* Debugging
*/
-#ifndef SMP_DEBUG
-#define mtx_assert(m, what)
-#else /* SMP_DEBUG */
+#ifdef MUTEX_DEBUG
-#define MA_OWNED 1
-#define MA_NOTOWNED 2
-#define mtx_assert(m, what) { \
- switch ((what)) { \
- case MA_OWNED: \
- ASS(mtx_owned((m))); \
- break; \
- case MA_NOTOWNED: \
- ASS(!mtx_owned((m))); \
- break; \
- default: \
- panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
- } \
-}
-
-#ifdef INVARIANTS
-#define ASS(ex) MPASS(ex)
-#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- #ex, __FILE__, __LINE__)
-#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- what, __FILE__, __LINE__)
-
-#ifdef MTX_STRS
-char STR_IEN[] = "fl & 0x200";
-char STR_IDIS[] = "!(fl & 0x200)";
-#else /* MTX_STRS */
+#ifdef _KERN_MUTEX_C_
+char STR_IEN[] = "ps & IPL != IPL_HIGH";
+char STR_IDIS[] = "ps & IPL == IPL_HIGH";
+char STR_SIEN[] = "mpp->mtx_saveintr != IPL_HIGH";
+#else /* _KERN_MUTEX_C_ */
extern char STR_IEN[];
extern char STR_IDIS[];
-#endif /* MTX_STRS */
+extern char STR_SIEN[];
+#endif /* _KERN_MUTEX_C_ */
+
+#endif /* MUTEX_DEBUG */
+
#define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
== ALPHA_PSL_IPL_HIGH, STR_IEN)
#define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
!= ALPHA_PSL_IPL_HIGH, STR_IDIS)
-#endif /* INVARIANTS */
-
-#endif /* SMP_DEBUG */
-
-#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
-#define ASS(ex)
-#define MPASS(ex)
-#define MPASS2(ex, where)
-#define ASS_IEN
-#define ASS_IDIS
-#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
-
-#ifdef WITNESS
-#ifndef SMP_DEBUG
-#error WITNESS requires SMP_DEBUG
-#endif /* SMP_DEBUG */
-#define WITNESS_ENTER(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_enter((m), (t), (f), (l))
-#define WITNESS_EXIT(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_exit((m), (t), (f), (l))
-
-#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
-#define WITNESS_SAVE_DECL(n) \
- const char * __CONCAT(n, __wf); \
- int __CONCAT(n, __wl)
-
-#define WITNESS_SAVE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
-} while (0)
-
-#define WITNESS_RESTORE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
-} while (0)
-
-void witness_init(struct mtx *, int flag);
-void witness_destroy(struct mtx *);
-void witness_enter(struct mtx *, int, const char *, int);
-void witness_try_enter(struct mtx *, int, const char *, int);
-void witness_exit(struct mtx *, int, const char *, int);
-void witness_display(void(*)(const char *fmt, ...));
-void witness_list(struct proc *);
-int witness_sleep(int, struct mtx *, const char *, int);
-void witness_save(struct mtx *, const char **, int *);
-void witness_restore(struct mtx *, const char *, int);
-#else /* WITNESS */
-#define WITNESS_ENTER(m, t, f, l)
-#define WITNESS_EXIT(m, t, f, l)
-#define WITNESS_SLEEP(check, m)
-#define WITNESS_SAVE_DECL(n)
-#define WITNESS_SAVE(m, n)
-#define WITNESS_RESTORE(m, n)
-
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
- */
-#define witness_init(m, flag) flag++
-#define witness_destroy(m)
-#define witness_enter(m, t, f, l)
-#define witness_try_enter(m, t, f, l)
-#define witness_exit(m, t, f, l)
-#endif /* WITNESS */
+#define ASS_SIEN(mpp) MPASS2((mpp)->saveintr != ALPHA_PSL_IPL_HIGH, STR_SIEN)
/*
* Assembly macros (for internal use only)
*--------------------------------------------------------------------------
*/
-/*
- * Get a sleep lock, deal with recursion inline
- */
-
#define _V(x) __STRING(x)
-#define _getlock_sleep(mp, tid, type) do { \
- if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_64(&(mp)->mtx_lock, MTX_RECURSE); \
- (mp)->mtx_recurse++; \
- } \
- } else { \
- alpha_mb(); \
- } \
-} while (0)
-
/*
* Get a spin lock, handle recusion inline (as the less common case)
*/
@@ -334,208 +80,6 @@ void witness_restore(struct mtx *, const char *, int);
} \
} while (0)
-/*
- * Get a lock without any recursion handling. Calls the hard enter
- * function if we can't get it inline.
- */
-
-#define _getlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
- else \
- alpha_mb(); \
-} while (0)
-
-/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is
- * handled in the hard function.
- */
-
-#define _exitlock_norecurse(mp, tid, type) do { \
- alpha_mb(); \
- if (atomic_cmpset_64(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-
-/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-
-#define _exitlock(mp, tid, type) do { \
- alpha_mb(); \
- if (atomic_cmpset_64(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) {\
- if (((mp)->mtx_lock & MTX_RECURSE) && \
- (--(mp)->mtx_recurse == 0)) \
- atomic_clear_64(&(mp)->mtx_lock, MTX_RECURSE); \
- else \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
-} while (0)
-
-/*
- * Release a spin lock (with possible recursion)
- */
-
-#define _exitlock_spin(mp) do { \
- alpha_mb(); \
- if ((mp)->mtx_recurse == 0) { \
- int _ipl = (mp)->mtx_saveipl; \
- atomic_cmpset_64(&(mp)->mtx_lock, (mp)->mtx_lock, \
- MTX_UNOWNED); \
- alpha_pal_swpipl(_ipl); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-
-/*
- * Externally visible mutex functions
- *------------------------------------------------------------------------
- */
-
-/*
- * Return non-zero if a mutex is already owned by the current thread
- */
-#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
-
-/* Common strings */
-#ifdef MTX_STRS
-char STR_mtx_enter_fmt[] = "GOT %s [%p] at %s:%d r=%d";
-char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
-char STR_mtx_exit_fmt[] = "REL %s [%p] at %s:%d r=%d";
-char STR_mtx_owned[] = "mtx_owned(mpp)";
-char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] at %s:%d result=%d";
-#else /* MTX_STRS */
-extern char STR_mtx_enter_fmt[];
-extern char STR_mtx_bad_type[];
-extern char STR_mtx_exit_fmt[];
-extern char STR_mtx_owned[];
-extern char STR_mtx_recurse[];
-extern char STR_mtx_try_enter_fmt[];
-#endif /* MTX_STRS */
-
-#ifndef KLD_MODULE
-/*
- * Get lock 'm', the macro handles the easy (and most common cases) and
- * leaves the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out
- */
-_MTX_INLINE void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *mpp = mtxp;
-
- /* bits only valid on mtx_exit() */
- MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type);
-
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this lock we
- * just bump the recursion count.
- */
- if (mpp->mtx_lock == CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- _getlock_spin_block(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
- done:
- WITNESS_ENTER(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
-}
-
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-_MTX_INLINE int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
-
- rval = atomic_cmpset_64(&mpp->mtx_lock, MTX_UNOWNED, CURTHD);
-#ifdef SMP_DEBUG
- if (rval && mpp->mtx_witness != NULL) {
- ASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
- }
-#endif
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, file, line, rval);
-
- return rval;
-}
-
-/*
- * Release lock m
- */
-_MTX_INLINE void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
-
- MPASS2(mtx_owned(mpp), STR_mtx_owned);
- WITNESS_EXIT(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
- atomic_cmpset_64(&mpp->mtx_lock, mpp->mtx_lock,
- MTX_UNOWNED);
- if (((type) & MTX_TOPHALF) == 0)
- alpha_pal_swpipl(mpp->mtx_saveipl);
- } else
- if ((type) & MTX_TOPHALF) {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else
- _exitlock_spin(mpp);
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- }
-}
-
-#endif /* KLD_MODULE */
#endif /* _KERNEL */
#else /* !LOCORE */
OpenPOWER on IntegriCloud