summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-09-10 00:25:56 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 10:06:21 -0700
commitfb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch)
treea006d078aa02e421a7dc4793c335308204859d36 /arch/sparc64/lib
parent4327edf6b8a7ac7dce144313947995538842d8fd (diff)
downloadop-kernel-dev-fb1c8f93d869b34cacb8b8932e2b83d96a19d720.zip
op-kernel-dev-fb1c8f93d869b34cacb8b8932e2b83d96a19d720.tar.gz
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler <grundler@parisc-linux.org> Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" <tony.luck@intel.com> ia64 fix Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjanv@infradead.org> Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se> Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sparc64/lib')
-rw-r--r--arch/sparc64/lib/Makefile1
-rw-r--r--arch/sparc64/lib/debuglocks.c366
2 files changed, 0 insertions, 367 deletions
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 40dbeec..d968aeb 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,7 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
copy_in_user.o user_fixup.o memmove.o \
mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
-lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
obj-y += iomap.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
deleted file mode 100644
index f5f0b55..0000000
--- a/arch/sparc64/lib/debuglocks.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
- * debuglocks.c: Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 1998 David S. Miller (davem@redhat.com)
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_SMP
-
-static inline void show (char *str, spinlock_t *lock, unsigned long caller)
-{
- int cpu = smp_processor_id();
-
- printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
- str, lock, cpu, (unsigned int) caller,
- lock->owner_pc, lock->owner_cpu);
-}
-
-static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
-{
- int cpu = smp_processor_id();
-
- printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
- str, lock, cpu, (unsigned int) caller,
- lock->writer_pc, lock->writer_cpu);
-}
-
-static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
-{
- int cpu = smp_processor_id();
- int i;
-
- printk("%s(%p) CPU#%d stuck at %08x\n",
- str, lock, cpu, (unsigned int) caller);
- printk("Writer: PC(%08x):CPU(%x)\n",
- lock->writer_pc, lock->writer_cpu);
- printk("Readers:");
- for (i = 0; i < NR_CPUS; i++)
- if (lock->reader_pc[i])
- printk(" %d[%08x]", i, lock->reader_pc[i]);
- printk("\n");
-}
-
-#undef INIT_STUCK
-#define INIT_STUCK 100000000
-
-void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
-{
- unsigned long val;
- int stuck = INIT_STUCK;
- int cpu = get_cpu();
- int shown = 0;
-
-again:
- __asm__ __volatile__("ldstub [%1], %0"
- : "=r" (val)
- : "r" (&(lock->lock))
- : "memory");
- membar_storeload_storestore();
- if (val) {
- while (lock->lock) {
- if (!--stuck) {
- if (shown++ <= 2)
- show(str, lock, caller);
- stuck = INIT_STUCK;
- }
- rmb();
- }
- goto again;
- }
- lock->owner_pc = ((unsigned int)caller);
- lock->owner_cpu = cpu;
- current->thread.smp_lock_count++;
- current->thread.smp_lock_pc = ((unsigned int)caller);
-
- put_cpu();
-}
-
-int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
-{
- unsigned long val;
- int cpu = get_cpu();
-
- __asm__ __volatile__("ldstub [%1], %0"
- : "=r" (val)
- : "r" (&(lock->lock))
- : "memory");
- membar_storeload_storestore();
- if (!val) {
- lock->owner_pc = ((unsigned int)caller);
- lock->owner_cpu = cpu;
- current->thread.smp_lock_count++;
- current->thread.smp_lock_pc = ((unsigned int)caller);
- }
-
- put_cpu();
-
- return val == 0;
-}
-
-void _do_spin_unlock(spinlock_t *lock)
-{
- lock->owner_pc = 0;
- lock->owner_cpu = NO_PROC_ID;
- membar_storestore_loadstore();
- lock->lock = 0;
- current->thread.smp_lock_count--;
-}
-
-/* Keep INIT_STUCK the same... */
-
-void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
-{
- unsigned long val;
- int stuck = INIT_STUCK;
- int cpu = get_cpu();
- int shown = 0;
-
-wlock_again:
- /* Wait for any writer to go away. */
- while (((long)(rw->lock)) < 0) {
- if (!--stuck) {
- if (shown++ <= 2)
- show_read(str, rw, caller);
- stuck = INIT_STUCK;
- }
- rmb();
- }
- /* Try once to increment the counter. */
- __asm__ __volatile__(
-" ldx [%0], %%g1\n"
-" brlz,a,pn %%g1, 2f\n"
-" mov 1, %0\n"
-" add %%g1, 1, %%g7\n"
-" casx [%0], %%g1, %%g7\n"
-" sub %%g1, %%g7, %0\n"
-"2:" : "=r" (val)
- : "0" (&(rw->lock))
- : "g1", "g7", "memory");
- membar_storeload_storestore();
- if (val)
- goto wlock_again;
- rw->reader_pc[cpu] = ((unsigned int)caller);
- current->thread.smp_lock_count++;
- current->thread.smp_lock_pc = ((unsigned int)caller);
-
- put_cpu();
-}
-
-void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
-{
- unsigned long val;
- int stuck = INIT_STUCK;
- int cpu = get_cpu();
- int shown = 0;
-
- /* Drop our identity _first_. */
- rw->reader_pc[cpu] = 0;
- current->thread.smp_lock_count--;
-runlock_again:
- /* Spin trying to decrement the counter using casx. */
- __asm__ __volatile__(
-" membar #StoreLoad | #LoadLoad\n"
-" ldx [%0], %%g1\n"
-" sub %%g1, 1, %%g7\n"
-" casx [%0], %%g1, %%g7\n"
-" membar #StoreLoad | #StoreStore\n"
-" sub %%g1, %%g7, %0\n"
- : "=r" (val)
- : "0" (&(rw->lock))
- : "g1", "g7", "memory");
- if (val) {
- if (!--stuck) {
- if (shown++ <= 2)
- show_read(str, rw, caller);
- stuck = INIT_STUCK;
- }
- goto runlock_again;
- }
-
- put_cpu();
-}
-
-void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
-{
- unsigned long val;
- int stuck = INIT_STUCK;
- int cpu = get_cpu();
- int shown = 0;
-
-wlock_again:
- /* Spin while there is another writer. */
- while (((long)rw->lock) < 0) {
- if (!--stuck) {
- if (shown++ <= 2)
- show_write(str, rw, caller);
- stuck = INIT_STUCK;
- }
- rmb();
- }
-
- /* Try to acuire the write bit. */
- __asm__ __volatile__(
-" mov 1, %%g3\n"
-" sllx %%g3, 63, %%g3\n"
-" ldx [%0], %%g1\n"
-" brlz,pn %%g1, 1f\n"
-" or %%g1, %%g3, %%g7\n"
-" casx [%0], %%g1, %%g7\n"
-" membar #StoreLoad | #StoreStore\n"
-" ba,pt %%xcc, 2f\n"
-" sub %%g1, %%g7, %0\n"
-"1: mov 1, %0\n"
-"2:" : "=r" (val)
- : "0" (&(rw->lock))
- : "g3", "g1", "g7", "memory");
- if (val) {
- /* We couldn't get the write bit. */
- if (!--stuck) {
- if (shown++ <= 2)
- show_write(str, rw, caller);
- stuck = INIT_STUCK;
- }
- goto wlock_again;
- }
- if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
- /* Readers still around, drop the write
- * lock, spin, and try again.
- */
- if (!--stuck) {
- if (shown++ <= 2)
- show_write(str, rw, caller);
- stuck = INIT_STUCK;
- }
- __asm__ __volatile__(
-" mov 1, %%g3\n"
-" sllx %%g3, 63, %%g3\n"
-"1: ldx [%0], %%g1\n"
-" andn %%g1, %%g3, %%g7\n"
-" casx [%0], %%g1, %%g7\n"
-" cmp %%g1, %%g7\n"
-" membar #StoreLoad | #StoreStore\n"
-" bne,pn %%xcc, 1b\n"
-" nop"
- : /* no outputs */
- : "r" (&(rw->lock))
- : "g3", "g1", "g7", "cc", "memory");
- while(rw->lock != 0) {
- if (!--stuck) {
- if (shown++ <= 2)
- show_write(str, rw, caller);
- stuck = INIT_STUCK;
- }
- rmb();
- }
- goto wlock_again;
- }
-
- /* We have it, say who we are. */
- rw->writer_pc = ((unsigned int)caller);
- rw->writer_cpu = cpu;
- current->thread.smp_lock_count++;
- current->thread.smp_lock_pc = ((unsigned int)caller);
-
- put_cpu();
-}
-
-void _do_write_unlock(rwlock_t *rw, unsigned long caller)
-{
- unsigned long val;
- int stuck = INIT_STUCK;
- int shown = 0;
-
- /* Drop our identity _first_ */
- rw->writer_pc = 0;
- rw->writer_cpu = NO_PROC_ID;
- current->thread.smp_lock_count--;
-wlock_again:
- __asm__ __volatile__(
-" membar #StoreLoad | #LoadLoad\n"
-" mov 1, %%g3\n"
-" sllx %%g3, 63, %%g3\n"
-" ldx [%0], %%g1\n"
-" andn %%g1, %%g3, %%g7\n"
-" casx [%0], %%g1, %%g7\n"
-" membar #StoreLoad | #StoreStore\n"
-" sub %%g1, %%g7, %0\n"
- : "=r" (val)
- : "0" (&(rw->lock))
- : "g3", "g1", "g7", "memory");
- if (val) {
- if (!--stuck) {
- if (shown++ <= 2)
- show_write("write_unlock", rw, caller);
- stuck = INIT_STUCK;
- }
- goto wlock_again;
- }
-}
-
-int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
-{
- unsigned long val;
- int cpu = get_cpu();
-
- /* Try to acuire the write bit. */
- __asm__ __volatile__(
-" mov 1, %%g3\n"
-" sllx %%g3, 63, %%g3\n"
-" ldx [%0], %%g1\n"
-" brlz,pn %%g1, 1f\n"
-" or %%g1, %%g3, %%g7\n"
-" casx [%0], %%g1, %%g7\n"
-" membar #StoreLoad | #StoreStore\n"
-" ba,pt %%xcc, 2f\n"
-" sub %%g1, %%g7, %0\n"
-"1: mov 1, %0\n"
-"2:" : "=r" (val)
- : "0" (&(rw->lock))
- : "g3", "g1", "g7", "memory");
-
- if (val) {
- put_cpu();
- return 0;
- }
-
- if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
- /* Readers still around, drop the write
- * lock, return failure.
- */
- __asm__ __volatile__(
-" mov 1, %%g3\n"
-" sllx %%g3, 63, %%g3\n"
-"1: ldx [%0], %%g1\n"
-" andn %%g1, %%g3, %%g7\n"
-" casx [%0], %%g1, %%g7\n"
-" cmp %%g1, %%g7\n"
-" membar #StoreLoad | #StoreStore\n"
-" bne,pn %%xcc, 1b\n"
-" nop"
- : /* no outputs */
- : "r" (&(rw->lock))
- : "g3", "g1", "g7", "cc", "memory");
-
- put_cpu();
-
- return 0;
- }
-
- /* We have it, say who we are. */
- rw->writer_pc = ((unsigned int)caller);
- rw->writer_cpu = cpu;
- current->thread.smp_lock_count++;
- current->thread.smp_lock_pc = ((unsigned int)caller);
-
- put_cpu();
-
- return 1;
-}
-
-#endif /* CONFIG_SMP */
OpenPOWER on IntegriCloud