summaryrefslogtreecommitdiffstats
path: root/include/linux/spinlock_api_smp.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-11-09 15:21:34 +0000
committerThomas Gleixner <tglx@linutronix.de>2009-11-13 20:53:28 +0100
commit6beb000923882f6204ea2cfcd932e568e900803f (patch)
tree1849bb1e7493bbf5e538d74cec18c6d6f4b16ced /include/linux/spinlock_api_smp.h
parent156171c71a0dc4bce12b4408bb1591f8fe32dc1a (diff)
downloadop-kernel-dev-6beb000923882f6204ea2cfcd932e568e900803f.zip
op-kernel-dev-6beb000923882f6204ea2cfcd932e568e900803f.tar.gz
locking: Make inlining decision Kconfig based
commit 892a7c67 (locking: Allow arch-inlined spinlocks) implements the selection of which lock functions are inlined based on defines in arch/.../spinlock.h: #define __always_inline__LOCK_FUNCTION Despite of the name __always_inline__* the lock functions can be built out of line depending on config options. Also if the arch does not set some inline defines the generic code might set them; again depending on config options. This makes it unnecessary hard to figure out when and which lock functions are inlined. Aside of that it makes it way harder and messier for -rt to manipulate the lock functions. Convert the inlining decision to CONFIG switches. Each lock function is inlined depending on CONFIG_INLINE_*. The configs implement the existing dependencies. The architecture code can select ARCH_INLINE_* to signal that it wants the corresponding lock function inlined. ARCH_INLINE_* is necessary as Kconfig ignores "depends on" restrictions when a config element is selected. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> LKML-Reference: <20091109151428.504477141@linutronix.de> Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Reviewed-by: Ingo Molnar <mingo@elte.hu> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'include/linux/spinlock_api_smp.h')
-rw-r--r--include/linux/spinlock_api_smp.h75
1 files changed, 28 insertions, 47 deletions
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 7a7e18f..8264a7f 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,137 +60,118 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
-/*
- * We inline the unlock functions in the nondebug case:
- */
-#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
-#define __always_inline__spin_unlock
-#define __always_inline__read_unlock
-#define __always_inline__write_unlock
-#define __always_inline__spin_unlock_irq
-#define __always_inline__read_unlock_irq
-#define __always_inline__write_unlock_irq
-#endif
-
-#ifndef CONFIG_DEBUG_SPINLOCK
-#ifndef CONFIG_GENERIC_LOCKBREAK
-
-#ifdef __always_inline__spin_lock
+#ifdef CONFIG_INLINE_SPIN_LOCK
#define _spin_lock(lock) __spin_lock(lock)
#endif
-#ifdef __always_inline__read_lock
+#ifdef CONFIG_INLINE_READ_LOCK
#define _read_lock(lock) __read_lock(lock)
#endif
-#ifdef __always_inline__write_lock
+#ifdef CONFIG_INLINE_WRITE_LOCK
#define _write_lock(lock) __write_lock(lock)
#endif
-#ifdef __always_inline__spin_lock_bh
+#ifdef CONFIG_INLINE_SPIN_LOCK_BH
#define _spin_lock_bh(lock) __spin_lock_bh(lock)
#endif
-#ifdef __always_inline__read_lock_bh
+#ifdef CONFIG_INLINE_READ_LOCK_BH
#define _read_lock_bh(lock) __read_lock_bh(lock)
#endif
-#ifdef __always_inline__write_lock_bh
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
#define _write_lock_bh(lock) __write_lock_bh(lock)
#endif
-#ifdef __always_inline__spin_lock_irq
+#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
#define _spin_lock_irq(lock) __spin_lock_irq(lock)
#endif
-#ifdef __always_inline__read_lock_irq
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
#define _read_lock_irq(lock) __read_lock_irq(lock)
#endif
-#ifdef __always_inline__write_lock_irq
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
#define _write_lock_irq(lock) __write_lock_irq(lock)
#endif
-#ifdef __always_inline__spin_lock_irqsave
+#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
#endif
-#ifdef __always_inline__read_lock_irqsave
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
#endif
-#ifdef __always_inline__write_lock_irqsave
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
#endif
-#endif /* !CONFIG_GENERIC_LOCKBREAK */
-
-#ifdef __always_inline__spin_trylock
+#ifdef CONFIG_INLINE_SPIN_TRYLOCK
#define _spin_trylock(lock) __spin_trylock(lock)
#endif
-#ifdef __always_inline__read_trylock
+#ifdef CONFIG_INLINE_READ_TRYLOCK
#define _read_trylock(lock) __read_trylock(lock)
#endif
-#ifdef __always_inline__write_trylock
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
#define _write_trylock(lock) __write_trylock(lock)
#endif
-#ifdef __always_inline__spin_trylock_bh
+#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
#endif
-#ifdef __always_inline__spin_unlock
+#ifdef CONFIG_INLINE_SPIN_UNLOCK
#define _spin_unlock(lock) __spin_unlock(lock)
#endif
-#ifdef __always_inline__read_unlock
+#ifdef CONFIG_INLINE_READ_UNLOCK
#define _read_unlock(lock) __read_unlock(lock)
#endif
-#ifdef __always_inline__write_unlock
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
#define _write_unlock(lock) __write_unlock(lock)
#endif
-#ifdef __always_inline__spin_unlock_bh
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
#endif
-#ifdef __always_inline__read_unlock_bh
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
#define _read_unlock_bh(lock) __read_unlock_bh(lock)
#endif
-#ifdef __always_inline__write_unlock_bh
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
#define _write_unlock_bh(lock) __write_unlock_bh(lock)
#endif
-#ifdef __always_inline__spin_unlock_irq
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
#endif
-#ifdef __always_inline__read_unlock_irq
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
#define _read_unlock_irq(lock) __read_unlock_irq(lock)
#endif
-#ifdef __always_inline__write_unlock_irq
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
#define _write_unlock_irq(lock) __write_unlock_irq(lock)
#endif
-#ifdef __always_inline__spin_unlock_irqrestore
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
#endif
-#ifdef __always_inline__read_unlock_irqrestore
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
#endif
-#ifdef __always_inline__write_unlock_irqrestore
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
#endif
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
static inline int __spin_trylock(spinlock_t *lock)
{
preempt_disable();
OpenPOWER on IntegriCloud