summaryrefslogtreecommitdiffstats
path: root/sys/i386/include
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2001-02-09 06:11:45 +0000
committerbmilekic <bmilekic@FreeBSD.org>2001-02-09 06:11:45 +0000
commitf364d4ac3621ae2689a3cc1b82c73eb491475a24 (patch)
tree84444d0341ce519800ed7913d826f5f38c622d6d /sys/i386/include
parent363bdddf694863339f6629340cfb324771b8ffe7 (diff)
downloadFreeBSD-src-f364d4ac3621ae2689a3cc1b82c73eb491475a24.zip
FreeBSD-src-f364d4ac3621ae2689a3cc1b82c73eb491475a24.tar.gz
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
Diffstat (limited to 'sys/i386/include')
-rw-r--r--sys/i386/include/cpu.h4
-rw-r--r--sys/i386/include/lock.h8
-rw-r--r--sys/i386/include/mptable.h20
-rw-r--r--sys/i386/include/mutex.h89
-rw-r--r--sys/i386/include/profile.h4
5 files changed, 84 insertions, 41 deletions
diff --git a/sys/i386/include/cpu.h b/sys/i386/include/cpu.h
index a1d47f0..0b99ec6 100644
--- a/sys/i386/include/cpu.h
+++ b/sys/i386/include/cpu.h
@@ -92,9 +92,9 @@
* counter in the proc table and flag isn't really necessary.
*/
#define need_proftick(p) do { \
- mtx_enter(&sched_lock, MTX_SPIN); \
+ mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
- mtx_exit(&sched_lock, MTX_SPIN); \
+ mtx_unlock_spin(&sched_lock); \
aston(); \
} while (0)
diff --git a/sys/i386/include/lock.h b/sys/i386/include/lock.h
index 414186c..35d1b3d 100644
--- a/sys/i386/include/lock.h
+++ b/sys/i386/include/lock.h
@@ -39,8 +39,8 @@
/*
* Protects the IO APIC and apic_imen as a critical region.
*/
-#define IMASK_LOCK MTX_ENTER(_imen_mtx, MTX_SPIN)
-#define IMASK_UNLOCK MTX_EXIT(_imen_mtx, MTX_SPIN)
+#define IMASK_LOCK MTX_LOCK_SPIN(_imen_mtx, 0)
+#define IMASK_UNLOCK MTX_UNLOCK_SPIN(_imen_mtx)
#else /* SMP */
@@ -62,8 +62,8 @@
* XXX should rc (RISCom/8) use this?
*/
#ifdef USE_COMLOCK
-#define COM_LOCK() mtx_enter(&com_mtx, MTX_SPIN)
-#define COM_UNLOCK() mtx_exit(&com_mtx, MTX_SPIN)
+#define COM_LOCK() mtx_lock_spin(&com_mtx)
+#define COM_UNLOCK() mtx_unlock_spin(&com_mtx)
#else
#define COM_LOCK()
#define COM_UNLOCK()
diff --git a/sys/i386/include/mptable.h b/sys/i386/include/mptable.h
index 9d53cd7..2802750 100644
--- a/sys/i386/include/mptable.h
+++ b/sys/i386/include/mptable.h
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/i386/include/mutex.h b/sys/i386/include/mutex.h
index 2f16de3..c4fe210 100644
--- a/sys/i386/include/mutex.h
+++ b/sys/i386/include/mutex.h
@@ -43,22 +43,10 @@ extern struct mtx clock_lock;
/*
* Debugging
*/
-#ifdef MUTEX_DEBUG
-
-#ifdef _KERN_MUTEX_C_
-char STR_IEN[] = "fl & PSL_I";
-char STR_IDIS[] = "!(fl & PSL_I)";
-char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_IEN[];
-extern char STR_IDIS[];
-extern char STR_SIEN[];
-#endif /* _KERN_MUTEX_C_ */
-#endif /* MUTEX_DEBUG */
-
-#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN)
-#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS)
-#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN)
+#define ASS_IEN MPASS2(read_eflags() & PSL_I, "fl & PSL_I")
+#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, "!(fl & PSL_I)")
+#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, \
+ "mpp->mtx_saveintr & PSL_I")
#define mtx_legal2block() (read_eflags() & PSL_I)
@@ -66,9 +54,6 @@ extern char STR_SIEN[];
* Assembly macros (for internal use only)
*------------------------------------------------------------------------------
*/
-
-#ifdef _KERN_MUTEX_C_
-
#define _V(x) __STRING(x)
#if 0
@@ -252,22 +237,80 @@ extern char STR_SIEN[];
#undef _V
-#endif /* _KERN_MUTEX_C_ */
-
#endif /* _KERNEL */
#else /* !LOCORE */
/*
* Simple assembly macros to get and release mutexes.
+ *
+ * Note: All of these macros accept a "flags" argument and are analoguous
+ * to the mtx_lock_flags and mtx_unlock_flags general macros. If one
+ * desires to not pass a flag, the value 0 may be passed as second
+ * argument.
+ *
+ * XXX: We only have MTX_LOCK_SPIN and MTX_UNLOCK_SPIN for now, since that's
+ * all we use right now. We should add MTX_LOCK and MTX_UNLOCK (for sleep
+ * locks) in the near future, however.
*/
+#define MTX_LOCK_SPIN(lck, flags) \
+ pushl %eax ; \
+ pushl %ecx ; \
+ pushl %ebx ; \
+ movl $(MTX_UNOWNED) , %eax ; \
+ movl PCPU(CURPROC), %ebx ; \
+ pushfl ; \
+ popl %ecx ; \
+ cli ; \
+ MPLOCKED cmpxchgl %ebx, lck+MTX_LOCK ; \
+ jz 2f ; \
+ cmpl lck+MTX_LOCK, %ebx ; \
+ je 3f ; \
+ pushl $0 ; \
+ pushl $0 ; \
+ pushl %ecx ; \
+ pushl $flags ; \
+ pushl $lck ; \
+ call _mtx_lock_spin ; \
+ addl $0x14, %esp ; \
+ jmp 1f ; \
+3: movl lck+MTX_RECURSECNT, %ebx ; \
+ incl %ebx ; \
+ movl %ebx, lck+MTX_RECURSECNT ; \
+ jmp 1f ; \
+2: movl %ecx, lck+MTX_SAVEINTR ; \
+1: popl %ebx ; \
+ popl %ecx ; \
+ popl %eax
+
+#define MTX_UNLOCK_SPIN(lck) \
+ pushl %edx ; \
+ pushl %eax ; \
+ movl lck+MTX_SAVEINTR, %edx ; \
+ movl lck+MTX_RECURSECNT, %eax ; \
+ testl %eax, %eax ; \
+ jne 2f ; \
+ movl $(MTX_UNOWNED), %eax ; \
+ xchgl %eax, lck+MTX_LOCK ; \
+ pushl %edx ; \
+ popfl ; \
+ jmp 1f ; \
+2: decl %eax ; \
+ movl %eax, lck+MTX_RECURSECNT ; \
+1: popl %eax ; \
+ popl %edx
+/*
+ * XXX: These two are broken right now and need to be made to work for
+ * XXX: sleep locks, as the above two work for spin locks. We're not in
+ * XXX: too much of a rush to do these as we do not use them right now.
+ */
#define MTX_ENTER(lck, type) \
pushl $0 ; /* dummy __LINE__ */ \
pushl $0 ; /* dummy __FILE__ */ \
pushl $type ; \
pushl $lck ; \
- call _mtx_enter ; \
+ call _mtx_lock_XXX ; \
addl $16,%esp
#define MTX_EXIT(lck, type) \
@@ -275,7 +318,7 @@ extern char STR_SIEN[];
pushl $0 ; /* dummy __FILE__ */ \
pushl $type ; \
pushl $lck ; \
- call _mtx_exit ; \
+ call _mtx_unlock_XXX ; \
addl $16,%esp
#endif /* !LOCORE */
diff --git a/sys/i386/include/profile.h b/sys/i386/include/profile.h
index 28db117..5f2a780 100644
--- a/sys/i386/include/profile.h
+++ b/sys/i386/include/profile.h
@@ -66,8 +66,8 @@
#ifdef SMP
#define MCOUNT_ENTER(s) { s = read_eflags(); \
__asm __volatile("cli" : : : "memory"); \
- mtx_enter(&mcount_mtx, MTX_DEF); }
-#define MCOUNT_EXIT(s) { mtx_exit(&mcount_mtx, MTX_DEF); write_eflags(s); }
+ mtx_lock(&mcount_mtx); }
+#define MCOUNT_EXIT(s) { mtx_unlock(&mcount_mtx); write_eflags(s); }
#else
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
#define MCOUNT_EXIT(s) (write_eflags(s))
OpenPOWER on IntegriCloud