diff options
author | Ingo Molnar <mingo@elte.hu> | 2005-09-10 00:25:56 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-10 10:06:21 -0700 |
commit | fb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch) | |
tree | a006d078aa02e421a7dc4793c335308204859d36 /arch | |
parent | 4327edf6b8a7ac7dce144313947995538842d8fd (diff) | |
download | op-kernel-dev-fb1c8f93d869b34cacb8b8932e2b83d96a19d720.zip op-kernel-dev-fb1c8f93d869b34cacb8b8932e2b83d96a19d720.tar.gz |
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/kernel/alpha_ksyms.c | 9 | ||||
-rw-r--r-- | arch/alpha/kernel/smp.c | 172 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 11 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 48 | ||||
-rw-r--r-- | arch/mips/lib/dec_and_lock.c | 8 | ||||
-rw-r--r-- | arch/parisc/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/parisc/lib/bitops.c | 4 | ||||
-rw-r--r-- | arch/parisc/lib/debuglocks.c | 277 | ||||
-rw-r--r-- | arch/ppc/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/ppc/lib/dec_and_lock.c | 8 | ||||
-rw-r--r-- | arch/ppc64/lib/dec_and_lock.c | 8 | ||||
-rw-r--r-- | arch/ppc64/lib/locks.c | 14 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 12 | ||||
-rw-r--r-- | arch/sparc/kernel/sparc_ksyms.c | 10 | ||||
-rw-r--r-- | arch/sparc/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/sparc/lib/debuglocks.c | 202 | ||||
-rw-r--r-- | arch/sparc64/kernel/process.c | 5 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 5 | ||||
-rw-r--r-- | arch/sparc64/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/sparc64/lib/debuglocks.c | 366 |
20 files changed, 28 insertions, 1137 deletions
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index fc5ef90..24ae9a3 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -185,15 +185,6 @@ EXPORT_SYMBOL(smp_num_cpus); EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function_on_cpu); EXPORT_SYMBOL(_atomic_dec_and_lock); -#ifdef CONFIG_DEBUG_SPINLOCK -EXPORT_SYMBOL(_raw_spin_unlock); -EXPORT_SYMBOL(debug_spin_lock); -EXPORT_SYMBOL(debug_spin_trylock); -#endif -#ifdef CONFIG_DEBUG_RWLOCK -EXPORT_SYMBOL(_raw_write_lock); -EXPORT_SYMBOL(_raw_read_lock); -#endif EXPORT_SYMBOL(cpu_present_mask); #endif /* CONFIG_SMP */ diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index e211aa7..da0be34 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -989,175 +989,3 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, preempt_enable(); } - -#ifdef CONFIG_DEBUG_SPINLOCK -void -_raw_spin_unlock(spinlock_t * lock) -{ - mb(); - lock->lock = 0; - - lock->on_cpu = -1; - lock->previous = NULL; - lock->task = NULL; - lock->base_file = "none"; - lock->line_no = 0; -} - -void -debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no) -{ - long tmp; - long stuck; - void *inline_pc = __builtin_return_address(0); - unsigned long started = jiffies; - int printed = 0; - int cpu = smp_processor_id(); - - stuck = 1L << 30; - try_again: - - /* Use sub-sections to put the actual loop at the end - of this object file's text section so as to perfect - branch prediction. */ - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " subq %2,1,%2\n" - " blbs %0,2f\n" - " or %0,1,%0\n" - " stl_c %0,%1\n" - " beq %0,3f\n" - "4: mb\n" - ".subsection 2\n" - "2: ldl %0,%1\n" - " subq %2,1,%2\n" - "3: blt %2,4b\n" - " blbs %0,2b\n" - " br 1b\n" - ".previous" - : "=r" (tmp), "=m" (lock->lock), "=r" (stuck) - : "m" (lock->lock), "2" (stuck) : "memory"); - - if (stuck < 0) { - printk(KERN_WARNING - "%s:%d spinlock stuck in %s at %p(%d)" - " owner %s at %p(%d) %s:%d\n", - base_file, line_no, - current->comm, inline_pc, cpu, - lock->task->comm, lock->previous, - lock->on_cpu, lock->base_file, lock->line_no); - stuck = 1L << 36; - printed = 1; - goto try_again; - } - - /* Exiting. Got the lock. */ - lock->on_cpu = cpu; - lock->previous = inline_pc; - lock->task = current; - lock->base_file = base_file; - lock->line_no = line_no; - - if (printed) { - printk(KERN_WARNING - "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n", - base_file, line_no, current->comm, inline_pc, - cpu, jiffies - started); - } -} - -int -debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) -{ - int ret; - if ((ret = !test_and_set_bit(0, lock))) { - lock->on_cpu = smp_processor_id(); - lock->previous = __builtin_return_address(0); - lock->task = current; - } else { - lock->base_file = base_file; - lock->line_no = line_no; - } - return ret; -} -#endif /* CONFIG_DEBUG_SPINLOCK */ - -#ifdef CONFIG_DEBUG_RWLOCK -void _raw_write_lock(rwlock_t * lock) -{ - long regx, regy; - int stuck_lock, stuck_reader; - void *inline_pc = __builtin_return_address(0); - - try_again: - - stuck_lock = 1<<30; - stuck_reader = 1<<30; - - __asm__ __volatile__( - "1: ldl_l %1,%0\n" - " blbs %1,6f\n" - " blt %1,8f\n" - " mov 1,%1\n" - " stl_c %1,%0\n" - " beq %1,6f\n" - "4: mb\n" - ".subsection 2\n" - "6: blt %3,4b # debug\n" - " subl %3,1,%3 # debug\n" - " ldl %1,%0\n" - " blbs %1,6b\n" - "8: blt %4,4b # debug\n" - " subl %4,1,%4 # debug\n" - " ldl %1,%0\n" - " blt %1,8b\n" - " br 1b\n" - ".previous" - : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy), - "=&r" (stuck_lock), "=&r" (stuck_reader) - : "m" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory"); - - if (stuck_lock < 0) { - printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc); - goto try_again; - } - if (stuck_reader < 0) { - printk(KERN_WARNING "write_lock stuck on readers at %p\n", - inline_pc); - goto try_again; - } -} - -void _raw_read_lock(rwlock_t * lock) -{ - long regx; - int stuck_lock; - void *inline_pc = __builtin_return_address(0); - - try_again: - - stuck_lock = 1<<30; - - __asm__ __volatile__( - "1: ldl_l %1,%0;" - " blbs %1,6f;" - " subl %1,2,%1;" - " stl_c %1,%0;" - " beq %1,6f;" - "4: mb\n" - ".subsection 2\n" - "6: ldl %1,%0;" - " blt %2,4b # debug\n" - " subl %2,1,%2 # debug\n" - " blbs %1,6b;" - " br 1b\n" - ".previous" - : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock) - : "m" (*(volatile int *)lock), "2" (stuck_lock) : "memory"); - - if (stuck_lock < 0) { - printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc); - goto try_again; - } -} -#endif /* CONFIG_DEBUG_RWLOCK */ diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 4ebbf39..8d48420 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -491,12 +491,7 @@ init_handler_platform (pal_min_state_area_t *ms, unw_init_from_interruption(&info, current, pt, sw); ia64_do_show_stack(&info, NULL); -#ifdef CONFIG_SMP - /* read_trylock() would be handy... */ - if (!tasklist_lock.write_lock) - read_lock(&tasklist_lock); -#endif - { + if (read_trylock(&tasklist_lock)) { struct task_struct *g, *t; do_each_thread (g, t) { if (t == current) @@ -506,10 +501,6 @@ init_handler_platform (pal_min_state_area_t *ms, show_stack(t, NULL); } while_each_thread (g, t); } -#ifdef CONFIG_SMP - if (!tasklist_lock.write_lock) - read_unlock(&tasklist_lock); -#endif printk("\nINIT dump complete. Please reboot now.\n"); while (1); /* hang city if no debugger */ diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 48b187f..a4576ac 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -892,7 +892,6 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, int try) { spinlock_t *ipilock; - unsigned long flags = 0; volatile unsigned long *ipicr_addr; unsigned long ipicr_val; unsigned long my_physid_mask; @@ -916,50 +915,27 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, * write IPICRi (send IPIi) * unlock ipi_lock[i] */ + spin_lock(ipilock); __asm__ __volatile__ ( - ";; LOCK ipi_lock[i] \n\t" + ";; CHECK IPICRi == 0 \n\t" ".fillinsn \n" "1: \n\t" - "mvfc %1, psw \n\t" - "clrpsw #0x40 -> nop \n\t" - DCACHE_CLEAR("r4", "r5", "%2") - "lock r4, @%2 \n\t" - "addi r4, #-1 \n\t" - "unlock r4, @%2 \n\t" - "mvtc %1, psw \n\t" - "bnez r4, 2f \n\t" - LOCK_SECTION_START(".balign 4 \n\t") - ".fillinsn \n" - "2: \n\t" - "ld r4, @%2 \n\t" - "blez r4, 2b \n\t" + "ld %0, @%1 \n\t" + "and %0, %4 \n\t" + "beqz %0, 2f \n\t" + "bnez %3, 3f \n\t" "bra 1b \n\t" - LOCK_SECTION_END - ";; CHECK IPICRi == 0 \n\t" - ".fillinsn \n" - "3: \n\t" - "ld %0, @%3 \n\t" - "and %0, %6 \n\t" - "beqz %0, 4f \n\t" - "bnez %5, 5f \n\t" - "bra 3b \n\t" ";; WRITE IPICRi (send IPIi) \n\t" ".fillinsn \n" - "4: \n\t" - "st %4, @%3 \n\t" - ";; UNLOCK ipi_lock[i] \n\t" + "2: \n\t" + "st %2, @%1 \n\t" ".fillinsn \n" - "5: \n\t" - "ldi r4, #1 \n\t" - "st r4, @%2 \n\t" + "3: \n\t" : "=&r"(ipicr_val) - : "r"(flags), "r"(&ipilock->slock), "r"(ipicr_addr), - "r"(mask), "r"(try), "r"(my_physid_mask) - : "memory", "r4" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r5" -#endif /* CONFIG_CHIP_M32700_TS1 */ + : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask) + : "memory" ); + spin_unlock(ipilock); return ipicr_val; } diff --git a/arch/mips/lib/dec_and_lock.c b/arch/mips/lib/dec_and_lock.c index e44e957..fd82c84 100644 --- a/arch/mips/lib/dec_and_lock.c +++ b/arch/mips/lib/dec_and_lock.c @@ -20,14 +20,7 @@ * has a cmpxchg, and where atomic->value is an int holding * the value of the atomic (i.e. the high bits aren't used * for a lock or anything like that). - * - * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h - * if spinlocks are empty and thus atomic_dec_and_lock is defined - * to be atomic_dec_and_test - in that case we don't need it - * defined here as well. */ - -#ifndef ATOMIC_DEC_AND_LOCK int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { int counter; @@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) } EXPORT_SYMBOL(_atomic_dec_and_lock); -#endif /* ATOMIC_DEC_AND_LOCK */ diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 7bf7056..5f2e690 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -5,5 +5,3 @@ lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o obj-y := iomap.o - -lib-$(CONFIG_SMP) += debuglocks.o diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 2de182f..90f400b 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -13,8 +13,8 @@ #include <asm/atomic.h> #ifdef CONFIG_SMP -spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { - [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED +raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { + [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED }; #endif diff --git a/arch/parisc/lib/debuglocks.c b/arch/parisc/lib/debuglocks.c deleted file mode 100644 index 1b33fe6..0000000 --- a/arch/parisc/lib/debuglocks.c +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Debugging versions of SMP locking primitives. - * - * Copyright (C) 2004 Thibaut VARENE <varenet@parisc-linux.org> - * - * Some code stollen from alpha & sparc64 ;) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * We use pdc_printf() throughout the file for all output messages, to avoid - * losing messages because of disabled interrupts. Since we're using these - * messages for debugging purposes, it makes sense not to send them to the - * linux console. - */ - - -#include <linux/config.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/spinlock.h> -#include <linux/hardirq.h> /* in_interrupt() */ -#include <asm/system.h> -#include <asm/hardirq.h> /* in_interrupt() */ -#include <asm/pdc.h> - -#undef INIT_STUCK -#define INIT_STUCK 1L << 30 - -#ifdef CONFIG_DEBUG_SPINLOCK - - -void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no) -{ - volatile unsigned int *a; - long stuck = INIT_STUCK; - void *inline_pc = __builtin_return_address(0); - unsigned long started = jiffies; - int printed = 0; - int cpu = smp_processor_id(); - -try_again: - - /* Do the actual locking */ - /* <T-Bone> ggg: we can't get stuck on the outter loop? - * <ggg> T-Bone: We can hit the outer loop - * alot if multiple CPUs are constantly racing for a lock - * and the backplane is NOT fair about which CPU sees - * the update first. But it won't hang since every failed - * attempt will drop us back into the inner loop and - * decrement `stuck'. - * <ggg> K-class and some of the others are NOT fair in the HW - * implementation so we could see false positives. - * But fixing the lock contention is easier than - * fixing the HW to be fair. - * <tausq> __ldcw() returns 1 if we get the lock; otherwise we - * spin until the value of the lock changes, or we time out. - */ - mb(); - a = __ldcw_align(lock); - while (stuck && (__ldcw(a) == 0)) - while ((*a == 0) && --stuck); - mb(); - - if (unlikely(stuck <= 0)) { - pdc_printf( - "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)" - " owned by %s:%d in %s at %p(%d)\n", - base_file, line_no, lock->module, lock, - current->comm, inline_pc, cpu, - lock->bfile, lock->bline, lock->task->comm, - lock->previous, lock->oncpu); - stuck = INIT_STUCK; - printed = 1; - goto try_again; - } - - /* Exiting. Got the lock. */ - lock->oncpu = cpu; - lock->previous = inline_pc; - lock->task = current; - lock->bfile = (char *)base_file; - lock->bline = line_no; - - if (unlikely(printed)) { - pdc_printf( - "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n", - base_file, line_no, current->comm, inline_pc, - cpu, jiffies - started); - } -} - -void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no) -{ - CHECK_LOCK(lock); - volatile unsigned int *a; - mb(); - a = __ldcw_align(lock); - if (unlikely((*a != 0) && lock->babble)) { - lock->babble--; - pdc_printf( - "%s:%d: spin_unlock(%s:%p) not locked\n", - base_file, line_no, lock->module, lock); - } - *a = 1; - mb(); -} - -int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) -{ - int ret; - volatile unsigned int *a; - mb(); - a = __ldcw_align(lock); - ret = (__ldcw(a) != 0); - mb(); - if (ret) { - lock->oncpu = smp_processor_id(); - lock->previous = __builtin_return_address(0); - lock->task = current; - } else { - lock->bfile = (char *)base_file; - lock->bline = line_no; - } - return ret; -} - -#endif /* CONFIG_DEBUG_SPINLOCK */ - -#ifdef CONFIG_DEBUG_RWLOCK - -/* Interrupts trouble detailed explanation, thx Grant: - * - * o writer (wants to modify data) attempts to acquire the rwlock - * o He gets the write lock. - * o Interupts are still enabled, we take an interrupt with the - * write still holding the lock. - * o interrupt handler tries to acquire the rwlock for read. - * o deadlock since the writer can't release it at this point. - * - * In general, any use of spinlocks that competes between "base" - * level and interrupt level code will risk deadlock. Interrupts - * need to be disabled in the base level routines to avoid it. - * Or more precisely, only the IRQ the base level routine - * is competing with for the lock. But it's more efficient/faster - * to just disable all interrupts on that CPU to guarantee - * once it gets the lock it can release it quickly too. - */ - -void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline) -{ - void *inline_pc = __builtin_return_address(0); - unsigned long started = jiffies; - long stuck = INIT_STUCK; - int printed = 0; - int cpu = smp_processor_id(); - - if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */ - pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline); - BUG(); - } - - /* Note: if interrupts are disabled (which is most likely), the printk - will never show on the console. We might need a polling method to flush - the dmesg buffer anyhow. */ - -retry: - _raw_spin_lock(&rw->lock); - - if(rw->counter != 0) { - /* this basically never happens */ - _raw_spin_unlock(&rw->lock); - - stuck--; - if ((unlikely(stuck <= 0)) && (rw->counter < 0)) { - pdc_printf( - "%s:%d: write_lock stuck on writer" - " in %s at %p(%d) %ld ticks\n", - bfile, bline, current->comm, inline_pc, - cpu, jiffies - started); - stuck = INIT_STUCK; - printed = 1; - } - else if (unlikely(stuck <= 0)) { - pdc_printf( - "%s:%d: write_lock stuck on reader" - " in %s at %p(%d) %ld ticks\n", - bfile, bline, current->comm, inline_pc, - cpu, jiffies - started); - stuck = INIT_STUCK; - printed = 1; - } - - while(rw->counter != 0); - - goto retry; - } - - /* got it. now leave without unlocking */ - rw->counter = -1; /* remember we are locked */ - - if (unlikely(printed)) { - pdc_printf( - "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n", - bfile, bline, current->comm, inline_pc, - cpu, jiffies - started); - } -} - -int _dbg_write_trylock(rwlock_t *rw, const char *bfile, int bline) -{ -#if 0 - void *inline_pc = __builtin_return_address(0); - int cpu = smp_processor_id(); -#endif - - if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */ - pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline); - BUG(); - } - - /* Note: if interrupts are disabled (which is most likely), the printk - will never show on the console. We might need a polling method to flush - the dmesg buffer anyhow. */ - - _raw_spin_lock(&rw->lock); - - if(rw->counter != 0) { - /* this basically never happens */ - _raw_spin_unlock(&rw->lock); - return 0; - } - - /* got it. now leave without unlocking */ - rw->counter = -1; /* remember we are locked */ -#if 0 - pdc_printf("%s:%d: try write_lock grabbed in %s at %p(%d)\n", - bfile, bline, current->comm, inline_pc, cpu); -#endif - return 1; -} - -void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline) -{ -#if 0 - void *inline_pc = __builtin_return_address(0); - unsigned long started = jiffies; - int cpu = smp_processor_id(); -#endif - unsigned long flags; - - local_irq_save(flags); - _raw_spin_lock(&rw->lock); - - rw->counter++; -#if 0 - pdc_printf( - "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n", - bfile, bline, current->comm, inline_pc, - cpu, jiffies - started); -#endif - _raw_spin_unlock(&rw->lock); - local_irq_restore(flags); -} - -#endif /* CONFIG_DEBUG_RWLOCK */ diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile index 1c380e6..f1e1fb4 100644 --- a/arch/ppc/lib/Makefile +++ b/arch/ppc/lib/Makefile @@ -4,6 +4,5 @@ obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o -obj-$(CONFIG_SMP) += locks.o obj-$(CONFIG_8xx) += rheap.o obj-$(CONFIG_CPM2) += rheap.o diff --git a/arch/ppc/lib/dec_and_lock.c b/arch/ppc/lib/dec_and_lock.c index 4ee8880..b18f0d9 100644 --- a/arch/ppc/lib/dec_and_lock.c +++ b/arch/ppc/lib/dec_and_lock.c @@ -11,14 +11,7 @@ * has a cmpxchg, and where atomic->value is an int holding * the value of the atomic (i.e. the high bits aren't used * for a lock or anything like that). - * - * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h - * if spinlocks are empty and thus atomic_dec_and_lock is defined - * to be atomic_dec_and_test - in that case we don't need it - * defined here as well. */ - -#ifndef ATOMIC_DEC_AND_LOCK int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { int counter; @@ -43,4 +36,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) } EXPORT_SYMBOL(_atomic_dec_and_lock); -#endif /* ATOMIC_DEC_AND_LOCK */ diff --git a/arch/ppc64/lib/dec_and_lock.c b/arch/ppc64/lib/dec_and_lock.c index 6e8d859..7b9d4da 100644 --- a/arch/ppc64/lib/dec_and_lock.c +++ b/arch/ppc64/lib/dec_and_lock.c @@ -20,14 +20,7 @@ * has a cmpxchg, and where atomic->value is an int holding * the value of the atomic (i.e. the high bits aren't used * for a lock or anything like that). - * - * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h - * if spinlocks are empty and thus atomic_dec_and_lock is defined - * to be atomic_dec_and_test - in that case we don't need it - * defined here as well. */ - -#ifndef ATOMIC_DEC_AND_LOCK int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { int counter; @@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) } EXPORT_SYMBOL(_atomic_dec_and_lock); -#endif /* ATOMIC_DEC_AND_LOCK */ diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c index ef70ef9..033643a 100644 --- a/arch/ppc64/lib/locks.c +++ b/arch/ppc64/lib/locks.c @@ -23,12 +23,12 @@ /* waiting for a spinlock... */ #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) -void __spin_yield(spinlock_t *lock) +void __spin_yield(raw_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; struct paca_struct *holder_paca; - lock_value = lock->lock; + lock_value = lock->slock; if (lock_value == 0) return; holder_cpu = lock_value & 0xffff; @@ -38,7 +38,7 @@ void __spin_yield(spinlock_t *lock) if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); - if (lock->lock != lock_value) + if (lock->slock != lock_value) return; /* something has changed */ #ifdef CONFIG_PPC_ISERIES HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, @@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock) * This turns out to be the same for read and write locks, since * we only know the holder if it is write-locked. */ -void __rw_yield(rwlock_t *rw) +void __rw_yield(raw_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; @@ -82,9 +82,9 @@ void __rw_yield(rwlock_t *rw) } #endif -void spin_unlock_wait(spinlock_t *lock) +void __raw_spin_unlock_wait(raw_spinlock_t *lock) { - while (lock->lock) { + while (lock->slock) { HMT_low(); if (SHARED_PROCESSOR) __spin_yield(lock); @@ -92,4 +92,4 @@ void spin_unlock_wait(spinlock_t *lock) HMT_medium(); } -EXPORT_SYMBOL(spin_unlock_wait); +EXPORT_SYMBOL(__raw_spin_unlock_wait); diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 888b559..2dc14e9 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -36,7 +36,7 @@ _diag44(void) } void -_raw_spin_lock_wait(spinlock_t *lp, unsigned int pc) +_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc) { int count = spin_retry; @@ -53,7 +53,7 @@ _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc) EXPORT_SYMBOL(_raw_spin_lock_wait); int -_raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc) +_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc) { int count = spin_retry; @@ -67,7 +67,7 @@ _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc) EXPORT_SYMBOL(_raw_spin_trylock_retry); void -_raw_read_lock_wait(rwlock_t *rw) +_raw_read_lock_wait(raw_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -86,7 +86,7 @@ _raw_read_lock_wait(rwlock_t *rw) EXPORT_SYMBOL(_raw_read_lock_wait); int -_raw_read_trylock_retry(rwlock_t *rw) +_raw_read_trylock_retry(raw_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -102,7 +102,7 @@ _raw_read_trylock_retry(rwlock_t *rw) EXPORT_SYMBOL(_raw_read_trylock_retry); void -_raw_write_lock_wait(rwlock_t *rw) +_raw_write_lock_wait(raw_rwlock_t *rw) { int count = spin_retry; @@ -119,7 +119,7 @@ _raw_write_lock_wait(rwlock_t *rw) EXPORT_SYMBOL(_raw_write_lock_wait); int -_raw_write_trylock_retry(rwlock_t *rw) +_raw_write_trylock_retry(raw_rwlock_t *rw) { int count = spin_retry; diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 5d974a2..f848093 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -114,17 +114,7 @@ DOT_ALIAS2(unsigned, urem, unsigned, unsigned) /* used by various drivers */ EXPORT_SYMBOL(sparc_cpu_model); EXPORT_SYMBOL(kernel_thread); -#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_SMP -EXPORT_SYMBOL(_do_spin_lock); -EXPORT_SYMBOL(_do_spin_unlock); -EXPORT_SYMBOL(_spin_trylock); -EXPORT_SYMBOL(_do_read_lock); -EXPORT_SYMBOL(_do_read_unlock); -EXPORT_SYMBOL(_do_write_lock); -EXPORT_SYMBOL(_do_write_unlock); -#endif -#else // XXX find what uses (or used) these. EXPORT_SYMBOL(___rw_read_enter); EXPORT_SYMBOL(___rw_read_exit); diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 2296ff9..fa50069 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -9,5 +9,3 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ copy_user.o locks.o atomic.o atomic32.o bitops.o \ lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o - -lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c deleted file mode 100644 index fb18235..0000000 --- a/arch/sparc/lib/debuglocks.c +++ /dev/null @@ -1,202 +0,0 @@ -/* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $ - * debuglocks.c: Debugging versions of SMP locking primitives. - * - * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au) - */ - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/threads.h> /* For NR_CPUS */ -#include <linux/spinlock.h> -#include <asm/psr.h> -#include <asm/system.h> - -#ifdef CONFIG_SMP - -/* Some notes on how these debugging routines work. When a lock is acquired - * an extra debugging member lock->owner_pc is set to the caller of the lock - * acquisition routine. Right before releasing a lock, the debugging program - * counter is cleared to zero. - * - * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU - * number of the owner in the lowest two bits. - */ - -#define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A)); - -static inline void show(char *str, spinlock_t *lock, unsigned long caller) -{ - int cpu = smp_processor_id(); - - printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str, - lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); -} - -static inline void show_read(char *str, rwlock_t *lock, unsigned long caller) -{ - int cpu = smp_processor_id(); - - printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str, - lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); -} - -static inline void show_write(char *str, rwlock_t *lock, unsigned long caller) -{ - int cpu = smp_processor_id(); - int i; - - printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str, - lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); - - for(i = 0; i < NR_CPUS; i++) - printk(" reader[%d]=%08lx", i, lock->reader_pc[i]); - - printk("\n"); -} - -#undef INIT_STUCK -#define INIT_STUCK 100000000 - -void _do_spin_lock(spinlock_t *lock, char *str) -{ - unsigned long caller; - unsigned long val; - int cpu = smp_processor_id(); - int stuck = INIT_STUCK; - - STORE_CALLER(caller); - -again: - __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock))); - if(val) { - while(lock->lock) { - if (!--stuck) { - show(str, lock, caller); - stuck = INIT_STUCK; - } - barrier(); - } - goto again; - } - lock->owner_pc = (cpu & 3) | (caller & ~3); -} - -int _spin_trylock(spinlock_t *lock) -{ - unsigned long val; - unsigned long caller; - int cpu = smp_processor_id(); - - STORE_CALLER(caller); - - __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock))); - if(!val) { - /* We got it, record our identity for debugging. */ - lock->owner_pc = (cpu & 3) | (caller & ~3); - } - return val == 0; -} - -void _do_spin_unlock(spinlock_t *lock) -{ - lock->owner_pc = 0; - barrier(); - lock->lock = 0; -} - -void _do_read_lock(rwlock_t *rw, char *str) -{ - unsigned long caller; - unsigned long val; - int cpu = smp_processor_id(); - int stuck = INIT_STUCK; - - STORE_CALLER(caller); - -wlock_again: - __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); - if(val) { - while(rw->lock & 0xff) { - if (!--stuck) { - show_read(str, rw, caller); - stuck = INIT_STUCK; - } - barrier(); - } - goto wlock_again; - } - - rw->reader_pc[cpu] = caller; - barrier(); - rw->lock++; -} - -void _do_read_unlock(rwlock_t *rw, char *str) -{ - unsigned long caller; - unsigned long val; - int cpu = smp_processor_id(); - int stuck = INIT_STUCK; - - STORE_CALLER(caller); - -wlock_again: - __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); - if(val) { - while(rw->lock & 0xff) { - if (!--stuck) { - show_read(str, rw, caller); - stuck = INIT_STUCK; - } - barrier(); - } - goto wlock_again; - } - - rw->reader_pc[cpu] = 0; - barrier(); - rw->lock -= 0x1ff; -} - -void _do_write_lock(rwlock_t *rw, char *str) -{ - unsigned long caller; - unsigned long val; - int cpu = smp_processor_id(); - int stuck = INIT_STUCK; - - STORE_CALLER(caller); - -wlock_again: - __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); - if(val) { -wlock_wait: - while(rw->lock) { - if (!--stuck) { - show_write(str, rw, caller); - stuck = INIT_STUCK; - } - barrier(); - } - goto wlock_again; - } - - if (rw->lock & ~0xff) { - *(((unsigned char *)&rw->lock)+3) = 0; - barrier(); - goto wlock_wait; - } - - barrier(); - rw->owner_pc = (cpu & 3) | (caller & ~3); -} - -void _do_write_unlock(rwlock_t *rw) -{ - rw->owner_pc = 0; - barrier(); - rw->lock = 0; -} - -#endif /* SMP */ diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 6625543..7d10b03 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -607,11 +607,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, struct thread_info *t = p->thread_info; char *child_trap_frame; -#ifdef CONFIG_DEBUG_SPINLOCK - p->thread.smp_lock_count = 0; - p->thread.smp_lock_pc = 0; -#endif - /* Calculate offset to stack_frame & pt_regs */ child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 7d9a0f6..cbb5e59 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -115,17 +115,12 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data); /* used by various drivers */ #ifdef CONFIG_SMP -#ifndef CONFIG_DEBUG_SPINLOCK /* Out of line rw-locking implementation. */ EXPORT_SYMBOL(__read_lock); EXPORT_SYMBOL(__read_unlock); EXPORT_SYMBOL(__write_lock); EXPORT_SYMBOL(__write_unlock); EXPORT_SYMBOL(__write_trylock); -/* Out of line spin-locking implementation. */ -EXPORT_SYMBOL(_raw_spin_lock); -EXPORT_SYMBOL(_raw_spin_lock_flags); -#endif /* Hard IRQ locking */ EXPORT_SYMBOL(synchronize_irq); diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index 40dbeec..d968aeb 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile @@ -14,7 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ copy_in_user.o user_fixup.o memmove.o \ mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o -lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o obj-y += iomap.o diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c deleted file mode 100644 index f5f0b55..0000000 --- a/arch/sparc64/lib/debuglocks.c +++ /dev/null @@ -1,366 +0,0 @@ -/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $ - * debuglocks.c: Debugging versions of SMP locking primitives. - * - * Copyright (C) 1998 David S. Miller (davem@redhat.com) - */ - -#include <linux/config.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/spinlock.h> -#include <asm/system.h> - -#ifdef CONFIG_SMP - -static inline void show (char *str, spinlock_t *lock, unsigned long caller) -{ - int cpu = smp_processor_id(); - - printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n", - str, lock, cpu, (unsigned int) caller, - lock->owner_pc, lock->owner_cpu); -} - -static inline void show_read (char *str, rwlock_t *lock, unsigned long caller) -{ - int cpu = smp_processor_id(); - - printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n", - str, lock, cpu, (unsigned int) caller, - lock->writer_pc, lock->writer_cpu); -} - -static inline void show_write (char *str, rwlock_t *lock, unsigned long caller) -{ - int cpu = smp_processor_id(); - int i; - - printk("%s(%p) CPU#%d stuck at %08x\n", - str, lock, cpu, (unsigned int) caller); - printk("Writer: PC(%08x):CPU(%x)\n", - lock->writer_pc, lock->writer_cpu); - printk("Readers:"); - for (i = 0; i < NR_CPUS; i++) - if (lock->reader_pc[i]) - printk(" %d[%08x]", i, lock->reader_pc[i]); - printk("\n"); -} - -#undef INIT_STUCK -#define INIT_STUCK 100000000 - -void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller) -{ - unsigned long val; - int stuck = INIT_STUCK; - int cpu = get_cpu(); - int shown = 0; - -again: - __asm__ __volatile__("ldstub [%1], %0" - : "=r" (val) - : "r" (&(lock->lock)) - : "memory"); - membar_storeload_storestore(); - if (val) { - while (lock->lock) { - if (!--stuck) { - if (shown++ <= 2) - show(str, lock, caller); - stuck = INIT_STUCK; - } - rmb(); - } - goto again; - } - lock->owner_pc = ((unsigned int)caller); - lock->owner_cpu = cpu; - current->thread.smp_lock_count++; - current->thread.smp_lock_pc = ((unsigned int)caller); - - put_cpu(); -} - -int _do_spin_trylock(spinlock_t *lock, unsigned long caller) -{ - unsigned long val; - int cpu = get_cpu(); - - __asm__ __volatile__("ldstub [%1], %0" - : "=r" (val) - : "r" (&(lock->lock)) - : "memory"); - membar_storeload_storestore(); - if (!val) { - lock->owner_pc = ((unsigned int)caller); - lock->owner_cpu = cpu; - current->thread.smp_lock_count++; - current->thread.smp_lock_pc = ((unsigned int)caller); - } - - put_cpu(); - - return val == 0; -} - -void _do_spin_unlock(spinlock_t *lock) -{ - lock->owner_pc = 0; - lock->owner_cpu = NO_PROC_ID; - membar_storestore_loadstore(); - lock->lock = 0; - current->thread.smp_lock_count--; -} - -/* Keep INIT_STUCK the same... */ - -void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller) -{ - unsigned long val; - int stuck = INIT_STUCK; - int cpu = get_cpu(); - int shown = 0; - -wlock_again: - /* Wait for any writer to go away. */ - while (((long)(rw->lock)) < 0) { - if (!--stuck) { - if (shown++ <= 2) - show_read(str, rw, caller); - stuck = INIT_STUCK; - } - rmb(); - } - /* Try once to increment the counter. */ - __asm__ __volatile__( -" ldx [%0], %%g1\n" -" brlz,a,pn %%g1, 2f\n" -" mov 1, %0\n" -" add %%g1, 1, %%g7\n" -" casx [%0], %%g1, %%g7\n" -" sub %%g1, %%g7, %0\n" -"2:" : "=r" (val) - : "0" (&(rw->lock)) - : "g1", "g7", "memory"); - membar_storeload_storestore(); - if (val) - goto wlock_again; - rw->reader_pc[cpu] = ((unsigned int)caller); - current->thread.smp_lock_count++; - current->thread.smp_lock_pc = ((unsigned int)caller); - - put_cpu(); -} - -void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller) -{ - unsigned long val; - int stuck = INIT_STUCK; - int cpu = get_cpu(); - int shown = 0; - - /* Drop our identity _first_. */ - rw->reader_pc[cpu] = 0; - current->thread.smp_lock_count--; -runlock_again: - /* Spin trying to decrement the counter using casx. */ - __asm__ __volatile__( -" membar #StoreLoad | #LoadLoad\n" -" ldx [%0], %%g1\n" -" sub %%g1, 1, %%g7\n" -" casx [%0], %%g1, %%g7\n" -" membar #StoreLoad | #StoreStore\n" -" sub %%g1, %%g7, %0\n" - : "=r" (val) - : "0" (&(rw->lock)) - : "g1", "g7", "memory"); - if (val) { - if (!--stuck) { - if (shown++ <= 2) - show_read(str, rw, caller); - stuck = INIT_STUCK; - } - goto runlock_again; - } - - put_cpu(); -} - -void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller) -{ - unsigned long val; - int stuck = INIT_STUCK; - int cpu = get_cpu(); - int shown = 0; - -wlock_again: - /* Spin while there is another writer. */ - while (((long)rw->lock) < 0) { - if (!--stuck) { - if (shown++ <= 2) - show_write(str, rw, caller); - stuck = INIT_STUCK; - } - rmb(); - } - - /* Try to acuire the write bit. */ - __asm__ __volatile__( -" mov 1, %%g3\n" -" sllx %%g3, 63, %%g3\n" -" ldx [%0], %%g1\n" -" brlz,pn %%g1, 1f\n" -" or %%g1, %%g3, %%g7\n" -" casx [%0], %%g1, %%g7\n" -" membar #StoreLoad | #StoreStore\n" -" ba,pt %%xcc, 2f\n" -" sub %%g1, %%g7, %0\n" -"1: mov 1, %0\n" -"2:" : "=r" (val) - : "0" (&(rw->lock)) - : "g3", "g1", "g7", "memory"); - if (val) { - /* We couldn't get the write bit. */ - if (!--stuck) { - if (shown++ <= 2) - show_write(str, rw, caller); - stuck = INIT_STUCK; - } - goto wlock_again; - } - if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) { - /* Readers still around, drop the write - * lock, spin, and try again. - */ - if (!--stuck) { - if (shown++ <= 2) - show_write(str, rw, caller); - stuck = INIT_STUCK; - } - __asm__ __volatile__( -" mov 1, %%g3\n" -" sllx %%g3, 63, %%g3\n" -"1: ldx [%0], %%g1\n" -" andn %%g1, %%g3, %%g7\n" -" casx [%0], %%g1, %%g7\n" -" cmp %%g1, %%g7\n" -" membar #StoreLoad | #StoreStore\n" -" bne,pn %%xcc, 1b\n" -" nop" - : /* no outputs */ - : "r" (&(rw->lock)) - : "g3", "g1", "g7", "cc", "memory"); - while(rw->lock != 0) { - if (!--stuck) { - if (shown++ <= 2) - show_write(str, rw, caller); - stuck = INIT_STUCK; - } - rmb(); - } - goto wlock_again; - } - - /* We have it, say who we are. */ - rw->writer_pc = ((unsigned int)caller); - rw->writer_cpu = cpu; - current->thread.smp_lock_count++; - current->thread.smp_lock_pc = ((unsigned int)caller); - - put_cpu(); -} - -void _do_write_unlock(rwlock_t *rw, unsigned long caller) -{ - unsigned long val; - int stuck = INIT_STUCK; - int shown = 0; - - /* Drop our identity _first_ */ - rw->writer_pc = 0; - rw->writer_cpu = NO_PROC_ID; - current->thread.smp_lock_count--; -wlock_again: - __asm__ __volatile__( -" membar #StoreLoad | #LoadLoad\n" -" mov 1, %%g3\n" -" sllx %%g3, 63, %%g3\n" -" ldx [%0], %%g1\n" -" andn %%g1, %%g3, %%g7\n" -" casx [%0], %%g1, %%g7\n" -" membar #StoreLoad | #StoreStore\n" -" sub %%g1, %%g7, %0\n" - : "=r" (val) - : "0" (&(rw->lock)) - : "g3", "g1", "g7", "memory"); - if (val) { - if (!--stuck) { - if (shown++ <= 2) - show_write("write_unlock", rw, caller); - stuck = INIT_STUCK; - } - goto wlock_again; - } -} - -int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller) -{ - unsigned long val; - int cpu = get_cpu(); - - /* Try to acuire the write bit. */ - __asm__ __volatile__( -" mov 1, %%g3\n" -" sllx %%g3, 63, %%g3\n" -" ldx [%0], %%g1\n" -" brlz,pn %%g1, 1f\n" -" or %%g1, %%g3, %%g7\n" -" casx [%0], %%g1, %%g7\n" -" membar #StoreLoad | #StoreStore\n" -" ba,pt %%xcc, 2f\n" -" sub %%g1, %%g7, %0\n" -"1: mov 1, %0\n" -"2:" : "=r" (val) - : "0" (&(rw->lock)) - : "g3", "g1", "g7", "memory"); - - if (val) { - put_cpu(); - return 0; - } - - if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) { - /* Readers still around, drop the write - * lock, return failure. - */ - __asm__ __volatile__( -" mov 1, %%g3\n" -" sllx %%g3, 63, %%g3\n" -"1: ldx [%0], %%g1\n" -" andn %%g1, %%g3, %%g7\n" -" casx [%0], %%g1, %%g7\n" -" cmp %%g1, %%g7\n" -" membar #StoreLoad | #StoreStore\n" -" bne,pn %%xcc, 1b\n" -" nop" - : /* no outputs */ - : "r" (&(rw->lock)) - : "g3", "g1", "g7", "cc", "memory"); - - put_cpu(); - - return 0; - } - - /* We have it, say who we are. */ - rw->writer_pc = ((unsigned int)caller); - rw->writer_cpu = cpu; - current->thread.smp_lock_count++; - current->thread.smp_lock_pc = ((unsigned int)caller); - - put_cpu(); - - return 1; -} - -#endif /* CONFIG_SMP */ |