summaryrefslogtreecommitdiffstats
path: root/sys/i386/include/mutex.h
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2000-10-20 07:26:37 +0000
committerjhb <jhb@FreeBSD.org>2000-10-20 07:26:37 +0000
commitf671832d7667351a66ab594f63d8cd4ef66e8e91 (patch)
tree99efa7c13c407257b68406e364261eeb1b2c1972 /sys/i386/include/mutex.h
parent787712af1c7ffb2642f8250f2111301ad77fdaf8 (diff)
downloadFreeBSD-src-f671832d7667351a66ab594f63d8cd4ef66e8e91.zip
FreeBSD-src-f671832d7667351a66ab594f63d8cd4ef66e8e91.tar.gz
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions of the mutex code are in machine/mutex.h now, which include the assembly macros for handling mutexes as well as optionally overriding the mutex micro-operations. For example, we use optimized micro-ops on the x86 platform #ifndef I386_CPU. - Change the behavior of the SMP_DEBUG kernel option. In the new code, mtx_assert() only depends on INVARIANTS, allowing other kernel developers to have working mutex assertiions without having to include all of the mutex debugging code. The SMP_DEBUG kernel option has been renamed to MUTEX_DEBUG and now just controls extra mutex debugging code. - Abolish the ugly mtx_f hack. Instead, we dynamically allocate seperate mtx_debug structures on the fly in mtx_init, except for mutexes that are initiated very early in the boot process. These mutexes are declared using a special MUTEX_DECLARE() macro, and use a new flag MTX_COLD when calling mtx_init. This is still somewhat hackish, but it is less evil than the mtx_f filler struct, and the mtx struct is now the same size with and without mutex debugging code. - Add some micro-micro-operation macros for doing the actual atomic operations on the mutex mtx_lock field to make it easier for other archs to override/optimize mutex ops if needed. These new tiny ops also clean up the code in some places by replacing long atomic operation function calls that spanned 2-3 lines with a short 1-line macro call. - Don't call mi_switch() from mtx_enter_hard() when we block while trying to obtain a sleep mutex. Calling mi_switch() would bogusly release Giant before switching to the next process. Instead, inline most of the code from mi_switch() in the mtx_enter_hard() function. Note that when we finally kill Giant we can back this out and go back to calling mi_switch().
Diffstat (limited to 'sys/i386/include/mutex.h')
-rw-r--r--sys/i386/include/mutex.h535
1 files changed, 23 insertions, 512 deletions
diff --git a/sys/i386/include/mutex.h b/sys/i386/include/mutex.h
index 881cbfa..1a8a7b6 100644
--- a/sys/i386/include/mutex.h
+++ b/sys/i386/include/mutex.h
@@ -33,266 +33,32 @@
#define _MACHINE_MUTEX_H_
#ifndef LOCORE
-#include <sys/queue.h>
#ifdef _KERNEL
-#include <sys/ktr.h>
-#include <sys/proc.h> /* Needed for curproc. */
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/globals.h>
-
-/*
- * If kern_mutex.c is being built, compile non-inlined versions of various
- * functions so that kernel modules can use them.
- */
-#ifndef _KERN_MUTEX_C_
-#define _MTX_INLINE static __inline
-#else
-#define _MTX_INLINE
-#endif
-
-/*
- * Mutex flags
- *
- * Types
- */
-#define MTX_DEF 0x0 /* Default (spin/sleep) */
-#define MTX_SPIN 0x1 /* Spin only lock */
-
-/* Options */
-#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
-#define MTX_NORECURSE 0x8 /* No recursion possible */
-#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
-#define MTX_NOSWITCH 0x20 /* Do not switch on release */
-#define MTX_FIRST 0x40 /* First spin lock holder */
-#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
-
-/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
-#define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
-
-/* Flags/value used in mtx_lock */
-#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
-#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
-#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
-#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
-
-#endif /* _KERNEL */
-
-/*
- * Sleep/spin mutex
- */
-struct mtx {
- volatile u_int mtx_lock; /* lock owner/gate/flags */
- volatile u_int mtx_recurse; /* number of recursive holds */
- u_int mtx_savefl; /* saved flags (for spin locks) */
- char *mtx_description;
- TAILQ_HEAD(, proc) mtx_blocked;
- LIST_ENTRY(mtx) mtx_contested;
- struct mtx *mtx_next; /* all locks in system */
- struct mtx *mtx_prev;
-#ifdef SMP_DEBUG
- /* If you add anything here, adjust the mtxf_t definition below */
- struct witness *mtx_witness;
- LIST_ENTRY(mtx) mtx_held;
- const char *mtx_file;
- int mtx_line;
-#endif /* SMP_DEBUG */
-};
-
-/*
- * Filler for structs which need to remain the same size
- * whether or not SMP_DEBUG is turned on.
- */
-typedef struct mtxf {
-#ifdef SMP_DEBUG
- char mtxf_data[0];
-#else
- char mtxf_data[4*sizeof(void *) + sizeof(int)];
-#endif
-} mtxf_t;
-
-#define mp_fixme(string)
-
-#ifdef _KERNEL
-/* Misc */
-#define CURTHD ((u_int)CURPROC) /* Current thread ID */
-
-/* Prototypes */
-void mtx_init(struct mtx *m, char *description, int flag);
-void mtx_enter_hard(struct mtx *, int type, int flags);
-void mtx_exit_hard(struct mtx *, int type);
-void mtx_destroy(struct mtx *m);
-
-/*
- * Wrap the following functions with cpp macros so that filenames and line
- * numbers are embedded in the code correctly.
- */
-#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
-void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
-int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
-void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
-#endif
-
-#define mtx_enter(mtxp, type) \
- _mtx_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_try_enter(mtxp, type) \
- _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_exit(mtxp, type) \
- _mtx_exit((mtxp), (type), __FILE__, __LINE__)
+#include <machine/psl.h>
/* Global locks */
-extern struct mtx sched_lock;
-extern struct mtx Giant;
extern struct mtx clock_lock;
/*
- * Used to replace return with an exit Giant and return.
- */
-
-#define EGAR(a) \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return (a); \
-} while (0)
-
-#define VEGAR \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return; \
-} while (0)
-
-#define DROP_GIANT() \
-do { \
- int _giantcnt; \
- WITNESS_SAVE_DECL(Giant); \
- \
- WITNESS_SAVE(&Giant, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF)
-
-#define PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant); \
-} while (0)
-
-#define PARTIAL_PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant)
-
-
-/*
* Debugging
*/
-#ifndef SMP_DEBUG
-#define mtx_assert(m, what)
-#else /* SMP_DEBUG */
-
-#define MA_OWNED 1
-#define MA_NOTOWNED 2
-#define mtx_assert(m, what) { \
- switch ((what)) { \
- case MA_OWNED: \
- ASS(mtx_owned((m))); \
- break; \
- case MA_NOTOWNED: \
- ASS(!mtx_owned((m))); \
- break; \
- default: \
- panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
- } \
-}
+#ifdef MUTEX_DEBUG
-#ifdef INVARIANTS
-#define ASS(ex) MPASS(ex)
-#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- #ex, __FILE__, __LINE__)
-#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- what, __FILE__, __LINE__)
-
-#ifdef MTX_STRS
-char STR_IEN[] = "fl & 0x200";
-char STR_IDIS[] = "!(fl & 0x200)";
-#else /* MTX_STRS */
+#ifdef _KERN_MUTEX_C_
+char STR_IEN[] = "fl & PSL_I";
+char STR_IDIS[] = "!(fl & PSL_I)";
+char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I";
+#else /* _KERN_MUTEX_C_ */
extern char STR_IEN[];
extern char STR_IDIS[];
-#endif /* MTX_STRS */
-#define ASS_IEN MPASS2(read_eflags() & 0x200, STR_IEN)
-#define ASS_IDIS MPASS2((read_eflags() & 0x200) == 0, STR_IDIS)
-#endif /* INVARIANTS */
-
-#endif /* SMP_DEBUG */
-
-#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
-#define ASS(ex)
-#define MPASS(ex)
-#define MPASS2(ex, where)
-#define ASS_IEN
-#define ASS_IDIS
-#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
-
-#ifdef WITNESS
-#ifndef SMP_DEBUG
-#error WITNESS requires SMP_DEBUG
-#endif /* SMP_DEBUG */
-#define WITNESS_ENTER(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_enter((m), (t), (f), (l))
-#define WITNESS_EXIT(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_exit((m), (t), (f), (l))
-
-#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
-#define WITNESS_SAVE_DECL(n) \
- const char * __CONCAT(n, __wf); \
- int __CONCAT(n, __wl)
-
-#define WITNESS_SAVE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
-} while (0)
-
-#define WITNESS_RESTORE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
-} while (0)
+extern char STR_SIEN[];
+#endif /* _KERN_MUTEX_C_ */
+#endif /* MUTEX_DEBUG */
-void witness_init(struct mtx *, int flag);
-void witness_destroy(struct mtx *);
-void witness_enter(struct mtx *, int, const char *, int);
-void witness_try_enter(struct mtx *, int, const char *, int);
-void witness_exit(struct mtx *, int, const char *, int);
-void witness_display(void(*)(const char *fmt, ...));
-void witness_list(struct proc *);
-int witness_sleep(int, struct mtx *, const char *, int);
-void witness_save(struct mtx *, const char **, int *);
-void witness_restore(struct mtx *, const char *, int);
-#else /* WITNESS */
-#define WITNESS_ENTER(m, t, f, l)
-#define WITNESS_EXIT(m, t, f, l)
-#define WITNESS_SLEEP(check, m)
-#define WITNESS_SAVE_DECL(n)
-#define WITNESS_SAVE(m, n)
-#define WITNESS_RESTORE(m, n)
-
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
- */
-#define witness_init(m, flag) flag++
-#define witness_destroy(m)
-#define witness_enter(m, t, f, l)
-#define witness_try_enter(m, t, f, l)
-#define witness_exit(m, t, f, l)
-#endif /* WITNESS */
+#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN)
+#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS)
+#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN)
/*
* Assembly macros (for internal use only)
@@ -358,7 +124,7 @@ void witness_restore(struct mtx *, const char *, int);
"# getlock_spin_block" \
: "=&a" (_res), /* 0 (dummy output) */ \
"+m" (mtxp->mtx_lock), /* 1 */ \
- "=m" (mtxp->mtx_savefl) /* 2 */ \
+ "=m" (mtxp->mtx_saveintr) /* 2 */ \
: "r" (tid), /* 3 (input) */ \
"gi" (type), /* 4 */ \
"g" (mtxp) /* 5 */ \
@@ -456,7 +222,7 @@ void witness_restore(struct mtx *, const char *, int);
* We use cmpxchgl to clear lock (instead of simple store) to flush posting
* buffers and make the change visible to other CPU's.
*/
-#define _exitlock_spin(mtxp, inten1, inten2) ({ \
+#define _exitlock_spin(mtxp) ({ \
int _res; \
\
__asm __volatile ( \
@@ -467,276 +233,21 @@ void witness_restore(struct mtx *, const char *, int);
" jmp 2f;" \
"1: movl %0,%%eax;" \
" movl $ " _V(MTX_UNOWNED) ",%%ecx;" \
-" " inten1 ";" \
+" pushl %3;" \
" " MPLOCKED "" \
" cmpxchgl %%ecx,%0;" \
-" " inten2 ";" \
+" popfl;" \
"2:" \
"# exitlock_spin" \
: "+m" (mtxp->mtx_lock), /* 0 */ \
"+m" (mtxp->mtx_recurse), /* 1 */ \
"=&a" (_res) /* 2 */ \
- : "g" (mtxp->mtx_savefl) /* 3 (used in 'inten') */ \
+ : "g" (mtxp->mtx_saveintr) /* 3 */ \
: "memory", "ecx" /* used */ ); \
})
-#else /* I386_CPU */
-
-/*
- * For 386 processors only.
- */
-
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_int(&(mp)->mtx_lock, MTX_RECURSE); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_fl = read_eflags(); \
- disable_intr(); \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_fl); \
- else \
- (mp)->mtx_savefl = _mtx_fl; \
-} while (0)
-
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-
-/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
- */
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-
-/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) { \
- if ((mp)->mtx_lock & MTX_RECURSE) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_int(&(mp)->mtx_lock, \
- MTX_RECURSE); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp, inten1, inten2) do { \
- if ((mp)->mtx_recurse == 0) { \
- atomic_cmpset_int(&(mp)->mtx_lock, (mp)->mtx_lock, \
- MTX_UNOWNED); \
- write_eflags((mp)->mtx_savefl); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-
#endif /* I386_CPU */
-/*
- * Externally visible mutex functions.
- *------------------------------------------------------------------------------
- */
-
-/*
- * Return non-zero if a mutex is already owned by the current thread.
- */
-#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
-
-/* Common strings */
-#ifdef MTX_STRS
-#ifdef KTR_EXTEND
-
-/*
- * KTR_EXTEND saves file name and line for all entries, so we don't need them
- * here. Theoretically we should also change the entries which refer to them
- * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
- * parameters, it doesn't do any harm to leave them.
- */
-char STR_mtx_enter_fmt[] = "GOT %s [%x] r=%d";
-char STR_mtx_exit_fmt[] = "REL %s [%x] r=%d";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] result=%d";
-#else
-char STR_mtx_enter_fmt[] = "GOT %s [%x] at %s:%d r=%d";
-char STR_mtx_exit_fmt[] = "REL %s [%x] at %s:%d r=%d";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] at %s:%d result=%d";
-#endif
-char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
-char STR_mtx_owned[] = "mtx_owned(mpp)";
-char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
-#else /* MTX_STRS */
-extern char STR_mtx_enter_fmt[];
-extern char STR_mtx_bad_type[];
-extern char STR_mtx_exit_fmt[];
-extern char STR_mtx_owned[];
-extern char STR_mtx_recurse[];
-extern char STR_mtx_try_enter_fmt[];
-#endif /* MTX_STRS */
-
-#ifndef KLD_MODULE
-/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
- */
-_MTX_INLINE void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *mpp = mtxp;
-
- /* bits only valid on mtx_exit() */
- MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type);
-
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
- done:
- WITNESS_ENTER(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
-}
-
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-_MTX_INLINE int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
-
- rval = atomic_cmpset_int(&mpp->mtx_lock, MTX_UNOWNED, CURTHD);
-#ifdef SMP_DEBUG
- if (rval && mpp->mtx_witness != NULL) {
- ASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
- }
-#endif
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, file, line, rval);
-
- return rval;
-}
-
-#define mtx_legal2block() (read_eflags() & 0x200)
-
-/*
- * Release lock m.
- */
-_MTX_INLINE void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
-
- MPASS2(mtx_owned(mpp), STR_mtx_owned);
- WITNESS_EXIT(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
- atomic_cmpset_int(&mpp->mtx_lock, mpp->mtx_lock,
- MTX_UNOWNED);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- write_eflags(mpp->mtx_savefl);
- }
- } else {
- if ((type) & MTX_TOPHALF)
- _exitlock_spin(mpp,,);
- else {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- _exitlock_spin(mpp,, "sti");
- } else {
- _exitlock_spin(mpp,
- "pushl %3", "popfl");
- }
- }
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- }
-}
-
-#endif /* KLD_MODULE */
#endif /* _KERNEL */
#else /* !LOCORE */
@@ -748,7 +259,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
#if defined(I386_CPU)
#define MTX_EXIT(lck, reg) \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
movl $ MTX_UNOWNED,lck+MTX_LOCK; \
popf
@@ -761,11 +272,11 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
MPLOCKED \
cmpxchgl reg,lck+MTX_LOCK; \
jnz 9b; \
- popl lck+MTX_SAVEFL;
+ popl lck+MTX_SAVEINTR;
/* Must use locked bus op (cmpxchg) when setting to unowned (barrier) */
#define MTX_EXIT(lck,reg) \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
movl lck+MTX_LOCK,%eax; \
movl $ MTX_UNOWNED,reg; \
MPLOCKED \
@@ -784,7 +295,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
MPLOCKED \
cmpxchgl reg,lck+MTX_LOCK; \
jnz 9b; \
- popl lck+MTX_SAVEFL; \
+ popl lck+MTX_SAVEINTR; \
jmp 10f; \
8: add $4,%esp; \
10:
@@ -795,7 +306,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
js 9f; \
movl %eax,lck+MTX_RECURSE; \
jmp 8f; \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
9: movl lck+MTX_LOCK,%eax; \
movl $ MTX_UNOWNED,reg; \
MPLOCKED \
OpenPOWER on IntegriCloud