summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2001-02-09 06:11:45 +0000
committerbmilekic <bmilekic@FreeBSD.org>2001-02-09 06:11:45 +0000
commitf364d4ac3621ae2689a3cc1b82c73eb491475a24 (patch)
tree84444d0341ce519800ed7913d826f5f38c622d6d /sys/powerpc
parent363bdddf694863339f6629340cfb324771b8ffe7 (diff)
downloadFreeBSD-src-f364d4ac3621ae2689a3cc1b82c73eb491475a24.zip
FreeBSD-src-f364d4ac3621ae2689a3cc1b82c73eb491475a24.tar.gz
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/vm_machdep.c8
-rw-r--r--sys/powerpc/include/mutex.h47
-rw-r--r--sys/powerpc/powerpc/mp_machdep.c8
-rw-r--r--sys/powerpc/powerpc/procfs_machdep.c24
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c8
5 files changed, 40 insertions, 55 deletions
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index 6d96337..851b1878 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -253,8 +253,8 @@ cpu_exit(p)
{
alpha_fpstate_drop(p);
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
@@ -437,7 +437,7 @@ vm_page_zero_idle()
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- if (mtx_try_enter(&Giant, MTX_DEF)) {
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -466,7 +466,7 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (1);
}
return (0);
diff --git a/sys/powerpc/include/mutex.h b/sys/powerpc/include/mutex.h
index 40717b0..564ad19 100644
--- a/sys/powerpc/include/mutex.h
+++ b/sys/powerpc/include/mutex.h
@@ -39,26 +39,12 @@
/*
* Debugging
*/
-#ifdef MUTEX_DEBUG
-
-#ifdef _KERN_MUTEX_C_
-char STR_IEN[] = "ps & IPL == IPL_0";
-char STR_IDIS[] = "ps & IPL == IPL_HIGH";
-char STR_SIEN[] = "mpp->mtx_saveintr == IPL_0";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_IEN[];
-extern char STR_IDIS[];
-extern char STR_SIEN[];
-#endif /* _KERN_MUTEX_C_ */
-
-#endif /* MUTEX_DEBUG */
-
#define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
- == ALPHA_PSL_IPL_0, STR_IEN)
+ == ALPHA_PSL_IPL_0, "ps & IPL == IPL_0")
#define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
- == ALPHA_PSL_IPL_HIGH, STR_IDIS)
+ == ALPHA_PSL_IPL_HIGH, "ps & IPL == IPL_HIGH")
#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr \
- == ALPHA_PSL_IPL_0, STR_SIEN)
+ == ALPHA_PSL_IPL_0, "mpp->mtx_saveintr == IPL_0")
#define mtx_legal2block() \
((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_0)
@@ -68,34 +54,33 @@ extern char STR_SIEN[];
*--------------------------------------------------------------------------
*/
-#ifdef _KERN_MUTEX_C_
-
-#define _V(x) __STRING(x)
-
/*
- * Get a spin lock, handle recusion inline (as the less common case)
+ * Get a spin lock, handle recusion inline.
*/
-
-#define _getlock_spin_block(mp, tid, type) do { \
+#define _get_spin_lock(mp, tid, opts) do { \
u_int _ipl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _ipl); \
- else { \
+ if (!_obtain_lock((mp), (tid))) { \
+ if ((mp)->mtx_lock == (uintptr_t)(tid)) \
+ (mp)->mtx_recurse++; \
+ else \
+ _mtx_lock_spin((mp), (opts), _ipl, __FILE__, \
+ __LINE__); \
+ } else { \
alpha_mb(); \
(mp)->mtx_saveintr = _ipl; \
} \
} while (0)
-#undef _V
-
-#endif /* _KERN_MUTEX_C_ */
-
#endif /* _KERNEL */
#else /* !LOCORE */
/*
* Simple assembly macros to get and release non-recursive spin locks
+ *
+ * XXX: These are presently unused and cannot be used right now. Need to be
+ * re-written (they are wrong). If you plan to use this and still see
+ * this message, know not to unless you fix them first! :-)
*/
#define MTX_ENTER(lck) \
ldiq a0, ALPHA_PSL_IPL_HIGH; \
diff --git a/sys/powerpc/powerpc/mp_machdep.c b/sys/powerpc/powerpc/mp_machdep.c
index 20e16b9..6a46c28 100644
--- a/sys/powerpc/powerpc/mp_machdep.c
+++ b/sys/powerpc/powerpc/mp_machdep.c
@@ -150,7 +150,7 @@ void
smp_init_secondary(void)
{
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("smp_init_secondary: called\n");
CTR0(KTR_SMP, "smp_init_secondary");
@@ -163,7 +163,7 @@ smp_init_secondary(void)
mp_ncpus = PCPU_GET(cpuno) + 1;
spl0();
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
extern void smp_init_secondary_glue(void);
@@ -379,7 +379,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -393,7 +393,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
static u_int64_t
diff --git a/sys/powerpc/powerpc/procfs_machdep.c b/sys/powerpc/powerpc/procfs_machdep.c
index 229d2f9..c0766ca 100644
--- a/sys/powerpc/powerpc/procfs_machdep.c
+++ b/sys/powerpc/powerpc/procfs_machdep.c
@@ -86,12 +86,12 @@ procfs_read_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_regs(p, regs));
}
@@ -101,12 +101,12 @@ procfs_write_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_regs(p, regs));
}
@@ -121,12 +121,12 @@ procfs_read_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_fpregs(p, fpregs));
}
@@ -136,12 +136,12 @@ procfs_write_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_fpregs(p, fpregs));
}
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index 6d96337..851b1878 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -253,8 +253,8 @@ cpu_exit(p)
{
alpha_fpstate_drop(p);
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
@@ -437,7 +437,7 @@ vm_page_zero_idle()
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- if (mtx_try_enter(&Giant, MTX_DEF)) {
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -466,7 +466,7 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (1);
}
return (0);
OpenPOWER on IntegriCloud