summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-03-28 02:40:47 +0000
committerjhb <jhb@FreeBSD.org>2001-03-28 02:40:47 +0000
commit4572ff9c78f9e673b960845742b5efce77d07348 (patch)
tree118249692a5e69c7cb4a44540b33e1ec40b0f35d
parentfdbf402b8548ef52a4bc5f1d4f27038556bfbf93 (diff)
downloadFreeBSD-src-4572ff9c78f9e673b960845742b5efce77d07348.zip
FreeBSD-src-4572ff9c78f9e673b960845742b5efce77d07348.tar.gz
- Switch from using save/disable/restore_intr to using critical_enter/exit
and change the u_int mtx_saveintr member of struct mtx to a critical_t mtx_savecrit. - On the alpha we no longer need a custom _get_spin_lock() macro to avoid an extra PAL call, so remove it. - Partially fix using mutexes with WITNESS in modules. Change all the _mtx_{un,}lock_{spin,}_flags() macros to accept explicit file and line parameters and rename them to use a prefix of two underscores. Inside of kern_mutex.c, generate wrapper functions for _mtx_{un,}lock_{spin,}_flags() (only using a prefix of one underscore) that are called from modules. The macros mtx_{un,}lock_{spin,}_flags() are mapped to the __mtx_* macros inside of the kernel to inline the usual case of mutex operations and map to the internal _mtx_* functions in the module case so that modules will use WITNESS and KTR logging if the kernel is compiled with support for it.
-rw-r--r--sys/alpha/include/mutex.h24
-rw-r--r--sys/amd64/include/mutex.h10
-rw-r--r--sys/i386/include/mutex.h10
-rw-r--r--sys/ia64/include/mutex.h2
-rw-r--r--sys/kern/kern_mutex.c36
-rw-r--r--sys/kern/subr_turnstile.c36
-rw-r--r--sys/kern/subr_witness.c36
-rw-r--r--sys/powerpc/include/mutex.h24
-rw-r--r--sys/sys/mutex.h90
9 files changed, 171 insertions, 97 deletions
diff --git a/sys/alpha/include/mutex.h b/sys/alpha/include/mutex.h
index a6e295d..fd398d7 100644
--- a/sys/alpha/include/mutex.h
+++ b/sys/alpha/include/mutex.h
@@ -36,29 +36,7 @@
#ifdef _KERNEL
-#define mtx_intr_enable(mutex) (mutex)->mtx_saveintr = ALPHA_PSL_IPL_0
-
-/*
- * Assembly macros (for internal use only)
- *--------------------------------------------------------------------------
- */
-
-/*
- * Get a spin lock, handle recusion inline.
- */
-#define _get_spin_lock(mp, tid, opts) do { \
- u_int _ipl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); \
- if (!_obtain_lock((mp), (tid))) { \
- if ((mp)->mtx_lock == (uintptr_t)(tid)) \
- (mp)->mtx_recurse++; \
- else \
- _mtx_lock_spin((mp), (opts), _ipl, __FILE__, \
- __LINE__); \
- } else { \
- alpha_mb(); \
- (mp)->mtx_saveintr = _ipl; \
- } \
-} while (0)
+#define mtx_intr_enable(mutex) (mutex)->mtx_savecrit = ALPHA_PSL_IPL_0
#endif /* _KERNEL */
diff --git a/sys/amd64/include/mutex.h b/sys/amd64/include/mutex.h
index 4184cde..b605efb 100644
--- a/sys/amd64/include/mutex.h
+++ b/sys/amd64/include/mutex.h
@@ -40,7 +40,7 @@
/* Global locks */
extern struct mtx clock_lock;
-#define mtx_intr_enable(mutex) (mutex)->mtx_saveintr |= PSL_I
+#define mtx_intr_enable(mutex) (mutex)->mtx_savecrit |= PSL_I
/*
* Assembly macros (for internal use only)
@@ -106,7 +106,7 @@ extern struct mtx clock_lock;
"# getlock_spin_block" \
: "+a" (_res), /* 0 */ \
"+m" (mtxp->mtx_lock), /* 1 */ \
- "=m" (mtxp->mtx_saveintr) /* 2 */ \
+ "=m" (mtxp->mtx_savecrit) /* 2 */ \
: "r" (tid), /* 3 (input) */ \
"gi" (type), /* 4 */ \
"g" (mtxp) /* 5 */ \
@@ -221,7 +221,7 @@ extern struct mtx clock_lock;
: "+m" (mtxp->mtx_lock), /* 0 */ \
"+m" (mtxp->mtx_recurse), /* 1 */ \
"=r" (_res) /* 2 */ \
- : "g" (mtxp->mtx_saveintr) /* 3 */ \
+ : "g" (mtxp->mtx_savecrit) /* 3 */ \
: "cc", "memory", "ecx" /* used */ ); \
})
@@ -270,7 +270,7 @@ extern struct mtx clock_lock;
incl %ebx ; \
movl %ebx, lck+MTX_RECURSECNT ; \
jmp 1f ; \
-2: movl %ecx, lck+MTX_SAVEINTR ; \
+2: movl %ecx, lck+MTX_SAVECRIT ; \
1: popl %ebx ; \
popl %ecx ; \
popl %eax
@@ -278,7 +278,7 @@ extern struct mtx clock_lock;
#define MTX_UNLOCK_SPIN(lck) \
pushl %edx ; \
pushl %eax ; \
- movl lck+MTX_SAVEINTR, %edx ; \
+ movl lck+MTX_SAVECRIT, %edx ; \
movl lck+MTX_RECURSECNT, %eax ; \
testl %eax, %eax ; \
jne 2f ; \
diff --git a/sys/i386/include/mutex.h b/sys/i386/include/mutex.h
index 4184cde..b605efb 100644
--- a/sys/i386/include/mutex.h
+++ b/sys/i386/include/mutex.h
@@ -40,7 +40,7 @@
/* Global locks */
extern struct mtx clock_lock;
-#define mtx_intr_enable(mutex) (mutex)->mtx_saveintr |= PSL_I
+#define mtx_intr_enable(mutex) (mutex)->mtx_savecrit |= PSL_I
/*
* Assembly macros (for internal use only)
@@ -106,7 +106,7 @@ extern struct mtx clock_lock;
"# getlock_spin_block" \
: "+a" (_res), /* 0 */ \
"+m" (mtxp->mtx_lock), /* 1 */ \
- "=m" (mtxp->mtx_saveintr) /* 2 */ \
+ "=m" (mtxp->mtx_savecrit) /* 2 */ \
: "r" (tid), /* 3 (input) */ \
"gi" (type), /* 4 */ \
"g" (mtxp) /* 5 */ \
@@ -221,7 +221,7 @@ extern struct mtx clock_lock;
: "+m" (mtxp->mtx_lock), /* 0 */ \
"+m" (mtxp->mtx_recurse), /* 1 */ \
"=r" (_res) /* 2 */ \
- : "g" (mtxp->mtx_saveintr) /* 3 */ \
+ : "g" (mtxp->mtx_savecrit) /* 3 */ \
: "cc", "memory", "ecx" /* used */ ); \
})
@@ -270,7 +270,7 @@ extern struct mtx clock_lock;
incl %ebx ; \
movl %ebx, lck+MTX_RECURSECNT ; \
jmp 1f ; \
-2: movl %ecx, lck+MTX_SAVEINTR ; \
+2: movl %ecx, lck+MTX_SAVECRIT ; \
1: popl %ebx ; \
popl %ecx ; \
popl %eax
@@ -278,7 +278,7 @@ extern struct mtx clock_lock;
#define MTX_UNLOCK_SPIN(lck) \
pushl %edx ; \
pushl %eax ; \
- movl lck+MTX_SAVEINTR, %edx ; \
+ movl lck+MTX_SAVECRIT, %edx ; \
movl lck+MTX_RECURSECNT, %eax ; \
testl %eax, %eax ; \
jne 2f ; \
diff --git a/sys/ia64/include/mutex.h b/sys/ia64/include/mutex.h
index f325f2b..f32bacd 100644
--- a/sys/ia64/include/mutex.h
+++ b/sys/ia64/include/mutex.h
@@ -38,7 +38,7 @@
#ifdef _KERNEL
-#define mtx_intr_enable(mutex) (mutex)->mtx_saveintr |= IA64_PSR_I
+#define mtx_intr_enable(mutex) (mutex)->mtx_savecrit |= IA64_PSR_I
#endif /* _KERNEL */
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 6540b56..ee285af 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -285,6 +285,38 @@ propagate_priority(struct proc *p)
}
/*
+ * Function versions of the inlined __mtx_* macros. These are used by
+ * modules and can also be called from assembly language if needed.
+ */
+void
+_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_lock_flags(m, opts, file, line);
+}
+
+void
+_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_unlock_flags(m, opts, file, line);
+}
+
+void
+_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_lock_spin_flags(m, opts, file, line);
+}
+
+void
+_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_unlock_spin_flags(m, opts, file, line);
+}
+
+/*
* The important part of mtx_trylock{,_flags}()
* Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
* if we're called, it's because we know we don't already own this lock.
@@ -461,7 +493,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* is handled inline.
*/
void
-_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
int line)
{
int i = 0;
@@ -488,7 +520,7 @@ _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
}
}
- m->mtx_saveintr = mtx_intr;
+ m->mtx_savecrit = mtx_crit;
if ((opts & MTX_QUIET) == 0)
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 6540b56..ee285af 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -285,6 +285,38 @@ propagate_priority(struct proc *p)
}
/*
+ * Function versions of the inlined __mtx_* macros. These are used by
+ * modules and can also be called from assembly language if needed.
+ */
+void
+_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_lock_flags(m, opts, file, line);
+}
+
+void
+_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_unlock_flags(m, opts, file, line);
+}
+
+void
+_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_lock_spin_flags(m, opts, file, line);
+}
+
+void
+_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_unlock_spin_flags(m, opts, file, line);
+}
+
+/*
* The important part of mtx_trylock{,_flags}()
* Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
* if we're called, it's because we know we don't already own this lock.
@@ -461,7 +493,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* is handled inline.
*/
void
-_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
int line)
{
int i = 0;
@@ -488,7 +520,7 @@ _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
}
}
- m->mtx_saveintr = mtx_intr;
+ m->mtx_savecrit = mtx_crit;
if ((opts & MTX_QUIET) == 0)
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 6540b56..ee285af 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -285,6 +285,38 @@ propagate_priority(struct proc *p)
}
/*
+ * Function versions of the inlined __mtx_* macros. These are used by
+ * modules and can also be called from assembly language if needed.
+ */
+void
+_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_lock_flags(m, opts, file, line);
+}
+
+void
+_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_unlock_flags(m, opts, file, line);
+}
+
+void
+_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_lock_spin_flags(m, opts, file, line);
+}
+
+void
+_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+{
+
+ __mtx_unlock_spin_flags(m, opts, file, line);
+}
+
+/*
* The important part of mtx_trylock{,_flags}()
* Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
* if we're called, it's because we know we don't already own this lock.
@@ -461,7 +493,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* is handled inline.
*/
void
-_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
int line)
{
int i = 0;
@@ -488,7 +520,7 @@ _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
}
}
- m->mtx_saveintr = mtx_intr;
+ m->mtx_savecrit = mtx_crit;
if ((opts & MTX_QUIET) == 0)
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
diff --git a/sys/powerpc/include/mutex.h b/sys/powerpc/include/mutex.h
index a6e295d..fd398d7 100644
--- a/sys/powerpc/include/mutex.h
+++ b/sys/powerpc/include/mutex.h
@@ -36,29 +36,7 @@
#ifdef _KERNEL
-#define mtx_intr_enable(mutex) (mutex)->mtx_saveintr = ALPHA_PSL_IPL_0
-
-/*
- * Assembly macros (for internal use only)
- *--------------------------------------------------------------------------
- */
-
-/*
- * Get a spin lock, handle recusion inline.
- */
-#define _get_spin_lock(mp, tid, opts) do { \
- u_int _ipl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); \
- if (!_obtain_lock((mp), (tid))) { \
- if ((mp)->mtx_lock == (uintptr_t)(tid)) \
- (mp)->mtx_recurse++; \
- else \
- _mtx_lock_spin((mp), (opts), _ipl, __FILE__, \
- __LINE__); \
- } else { \
- alpha_mb(); \
- (mp)->mtx_saveintr = _ipl; \
- } \
-} while (0)
+#define mtx_intr_enable(mutex) (mutex)->mtx_savecrit = ALPHA_PSL_IPL_0
#endif /* _KERNEL */
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index eaf36df..d637cec 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -90,7 +90,7 @@ struct mtx_debug;
struct mtx {
volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */
volatile u_int mtx_recurse; /* number of recursive holds */
- u_int mtx_saveintr; /* saved flags (for spin locks) */
+ u_int mtx_savecrit; /* saved flags (for spin locks) */
int mtx_flags; /* flags passed to mtx_init() */
const char *mtx_description;
TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */
@@ -131,10 +131,16 @@ void mtx_init(struct mtx *m, const char *description, int opts);
void mtx_destroy(struct mtx *m);
void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line);
void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
-void _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr,
+void _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit,
const char *file, int line);
void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
int _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
+void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line);
+void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line);
+void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file,
+ int line);
+void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file,
+ int line);
#ifdef INVARIANT_SUPPORT
void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#endif
@@ -167,9 +173,9 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
* easy.
*/
#ifndef _get_sleep_lock
-#define _get_sleep_lock(mp, tid, opts) do { \
+#define _get_sleep_lock(mp, tid, opts, file, line) do { \
if (!_obtain_lock((mp), (tid))) \
- _mtx_lock_sleep((mp), (opts), __FILE__, __LINE__); \
+ _mtx_lock_sleep((mp), (opts), (file), (line)); \
} while (0)
#endif
@@ -181,17 +187,17 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
* a deal.
*/
#ifndef _get_spin_lock
-#define _get_spin_lock(mp, tid, opts) do { \
- u_int _mtx_intr = save_intr(); \
- disable_intr(); \
+#define _get_spin_lock(mp, tid, opts, file, line) do { \
+ critical_t _mtx_crit; \
+ _mtx_crit = critical_enter(); \
if (!_obtain_lock((mp), (tid))) { \
if ((mp)->mtx_lock == (uintptr_t)(tid)) \
(mp)->mtx_recurse++; \
else \
- _mtx_lock_spin((mp), (opts), _mtx_intr, \
- __FILE__, __LINE__); \
+ _mtx_lock_spin((mp), (opts), _mtx_crit, (file), \
+ (line)); \
} else \
- (mp)->mtx_saveintr = _mtx_intr; \
+ (mp)->mtx_savecrit = _mtx_crit; \
} while (0)
#endif
@@ -200,9 +206,9 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
* easy.
*/
#ifndef _rel_sleep_lock
-#define _rel_sleep_lock(mp, tid, opts) do { \
+#define _rel_sleep_lock(mp, tid, opts, file, line) do { \
if (!_release_lock((mp), (tid))) \
- _mtx_unlock_sleep((mp), (opts), __FILE__, __LINE__); \
+ _mtx_unlock_sleep((mp), (opts), (file), (line)); \
} while (0)
#endif
@@ -214,12 +220,12 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
*/
#ifndef _rel_spin_lock
#define _rel_spin_lock(mp) do { \
- u_int _mtx_intr = (mp)->mtx_saveintr; \
+ critical_t _mtx_crit = (mp)->mtx_savecrit; \
if (mtx_recursed((mp))) \
(mp)->mtx_recurse--; \
else { \
_release_lock_quick((mp)); \
- restore_intr(_mtx_intr); \
+ critical_exit(_mtx_crit); \
} \
} while (0)
#endif
@@ -259,52 +265,68 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#define mtx_unlock(m) mtx_unlock_flags((m), 0)
#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0)
-#define mtx_lock_flags(m, opts) do { \
+#ifdef KLD_MODULE
+#define mtx_lock_flags(m, opts) \
+ _mtx_lock_flags((m), (opts), __FILE__, __LINE__)
+#define mtx_unlock_flags(m, opts) \
+ _mtx_unlock_flags((m), (opts), __FILE__, __LINE__)
+#define mtx_lock_spin_flags(m, opts) \
+ _mtx_lock_spin_flags((m), (opts), __FILE__, __LINE__)
+#define mtx_unlock_spin_flags(m, opts) \
+ _mtx_unlock_spin_flags((m), (opts), __FILE__, __LINE__)
+#else
+#define mtx_lock_flags(m, opts) \
+ __mtx_lock_flags((m), (opts), __FILE__, __LINE__)
+#define mtx_unlock_flags(m, opts) \
+ __mtx_unlock_flags((m), (opts), __FILE__, __LINE__)
+#define mtx_lock_spin_flags(m, opts) \
+ __mtx_lock_spin_flags((m), (opts), __FILE__, __LINE__)
+#define mtx_unlock_spin_flags(m, opts) \
+ __mtx_unlock_spin_flags((m), (opts), __FILE__, __LINE__)
+#endif
+
+#define __mtx_lock_flags(m, opts, file, line) do { \
MPASS(curproc != NULL); \
KASSERT(((opts) & MTX_NOSWITCH) == 0, \
- ("MTX_NOSWITCH used at %s:%d", __FILE__, __LINE__)); \
- _get_sleep_lock((m), curproc, (opts)); \
+ ("MTX_NOSWITCH used at %s:%d", (file), (line))); \
+ _get_sleep_lock((m), curproc, (opts), (file), (line)); \
if (((opts) & MTX_QUIET) == 0) \
CTR5(KTR_LOCK, STR_mtx_lock_slp, \
(m)->mtx_description, (m), (m)->mtx_recurse, \
- __FILE__, __LINE__); \
- WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
- __LINE__); \
+ (file), (line)); \
+ WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), (file), (line)); \
} while (0)
-#define mtx_lock_spin_flags(m, opts) do { \
+#define __mtx_lock_spin_flags(m, opts, file, line) do { \
MPASS(curproc != NULL); \
- _get_spin_lock((m), curproc, (opts)); \
+ _get_spin_lock((m), curproc, (opts), (file), (line)); \
if (((opts) & MTX_QUIET) == 0) \
CTR5(KTR_LOCK, STR_mtx_lock_spn, \
(m)->mtx_description, (m), (m)->mtx_recurse, \
- __FILE__, __LINE__); \
- WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
- __LINE__); \
+ (file), (line)); \
+ WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), (file), (line)); \
} while (0)
-#define mtx_unlock_flags(m, opts) do { \
+#define __mtx_unlock_flags(m, opts, file, line) do { \
MPASS(curproc != NULL); \
mtx_assert((m), MA_OWNED); \
- WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
- __LINE__); \
- _rel_sleep_lock((m), curproc, (opts)); \
+ WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), (file), (line)); \
+ _rel_sleep_lock((m), curproc, (opts), (file), (line)); \
if (((opts) & MTX_QUIET) == 0) \
CTR5(KTR_LOCK, STR_mtx_unlock_slp, \
(m)->mtx_description, (m), (m)->mtx_recurse, \
- __FILE__, __LINE__); \
+ (file), (line)); \
} while (0)
-#define mtx_unlock_spin_flags(m, opts) do { \
+#define __mtx_unlock_spin_flags(m, opts, file, line) do { \
MPASS(curproc != NULL); \
mtx_assert((m), MA_OWNED); \
- WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
- __LINE__); \
+ WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), (file), (line)); \
_rel_spin_lock((m)); \
if (((opts) & MTX_QUIET) == 0) \
CTR5(KTR_LOCK, STR_mtx_unlock_spn, \
(m)->mtx_description, (m), (m)->mtx_recurse, \
- __FILE__, __LINE__); \
+ (file), (line)); \
} while (0)
#define mtx_trylock_flags(m, opts) \
OpenPOWER on IntegriCloud