summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-10-17 09:58:25 +0200
committerIngo Molnar <mingo@elte.hu>2009-10-17 09:58:25 +0200
commitbb3c3e807140816b5f5fd4840473ee52a916ad4f (patch)
tree9e8a69d266a7df86ca16177eefffab4b4e910753 /arch/ia64
parent595c36490deb49381dc51231a3d5e6b66786ed27 (diff)
parent012abeea669ea49636cf952d13298bb68654146a (diff)
downloadop-kernel-dev-bb3c3e807140816b5f5fd4840473ee52a916ad4f.zip
op-kernel-dev-bb3c3e807140816b5f5fd4840473ee52a916ad4f.tar.gz
Merge commit 'v2.6.32-rc5' into perf/probes
Conflicts: kernel/trace/trace_event_profile.c Merge reason: update to -rc5 and resolve conflict. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c4
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/spinlock.h175
-rw-r--r--arch/ia64/include/asm/spinlock_types.h2
-rw-r--r--arch/ia64/include/asm/topology.h3
-rw-r--r--arch/ia64/install.sh4
-rw-r--r--arch/ia64/kernel/Makefile.gate2
-rw-r--r--arch/ia64/kernel/head.S89
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c20
-rw-r--r--arch/ia64/kernel/init_task.c3
-rw-r--r--arch/ia64/kernel/smp.c2
-rw-r--r--arch/ia64/oprofile/backtrace.c20
14 files changed, 102 insertions, 229 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6851e52..1ee596c 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -60,9 +60,7 @@ config IOMMU_HELPER
bool
config GENERIC_LOCKBREAK
- bool
- default y
- depends on SMP && PREEMPT
+ def_bool n
config RWSEM_XCHGADD_ALGORITHM
bool
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index f92bdaa..c69552b 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -69,11 +69,11 @@ ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf)
}
-static struct vm_operations_struct ia32_shared_page_vm_ops = {
+static const struct vm_operations_struct ia32_shared_page_vm_ops = {
.fault = ia32_install_shared_page
};
-static struct vm_operations_struct ia32_gate_page_vm_ops = {
+static const struct vm_operations_struct ia32_gate_page_vm_ops = {
.fault = ia32_install_gate_page
};
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 0f82cc2..91df968 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -89,10 +89,12 @@ ia64_acpi_release_global_lock (unsigned int *lock)
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
+#ifdef CONFIG_ACPI
#define acpi_disabled 0 /* ACPI always enabled on IA64 */
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
+#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index d217d1d..0b3b399 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else /* CONFIG_SMP */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 13ab715..30bb930 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -19,103 +19,106 @@
#define __raw_spin_lock_init(x) ((x)->lock = 0)
-#ifdef ASM_SUPPORTED
/*
- * Try to get the lock. If we fail to get the lock, make a non-standard call to
- * ia64_spinlock_contention(). We do not use a normal call because that would force all
- * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
- * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered".
+ * Ticket locks are conceptually two parts, one indicating the current head of
+ * the queue, and the other indicating the current tail. The lock is acquired
+ * by atomically noting the tail and incrementing it by one (thus adding
+ * ourself to the queue and noting our position), then waiting until the head
+ * becomes equal to the the initial value of the tail.
+ *
+ * 63 32 31 0
+ * +----------------------------------------------------+
+ * | next_ticket_number | now_serving |
+ * +----------------------------------------------------+
*/
-#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
+#define TICKET_SHIFT 32
-static inline void
-__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
+static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{
- register volatile unsigned int *ptr asm ("r31") = &lock->lock;
-
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-# ifdef CONFIG_ITANIUM
- /* don't use brl on Itanium... */
- asm volatile ("{\n\t"
- " mov ar.ccv = r0\n\t"
- " mov r28 = ip\n\t"
- " mov r30 = 1;;\n\t"
- "}\n\t"
- "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
- "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n\t"
- "mov b6 = r29;;\n\t"
- "mov r27=%2\n\t"
- "(p14) br.cond.spnt.many b6"
- : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
-# else
- asm volatile ("{\n\t"
- " mov ar.ccv = r0\n\t"
- " mov r28 = ip\n\t"
- " mov r30 = 1;;\n\t"
- "}\n\t"
- "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n\t"
- "mov r27=%2\n\t"
- "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
- : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
-# endif /* CONFIG_MCKINLEY */
-#else
-# ifdef CONFIG_ITANIUM
- /* don't use brl on Itanium... */
- /* mis-declare, so we get the entry-point, not it's function descriptor: */
- asm volatile ("mov r30 = 1\n\t"
- "mov r27=%2\n\t"
- "mov ar.ccv = r0;;\n\t"
- "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
- "movl r29 = ia64_spinlock_contention;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n\t"
- "mov b6 = r29;;\n\t"
- "(p14) br.call.spnt.many b6 = b6"
- : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
-# else
- asm volatile ("mov r30 = 1\n\t"
- "mov r27=%2\n\t"
- "mov ar.ccv = r0;;\n\t"
- "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n\t"
- "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
- : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
-# endif /* CONFIG_MCKINLEY */
-#endif
+ int *p = (int *)&lock->lock, turn, now_serving;
+
+ now_serving = *p;
+ turn = ia64_fetchadd(1, p+1, acq);
+
+ if (turn == now_serving)
+ return;
+
+ do {
+ cpu_relax();
+ } while (ACCESS_ONCE(*p) != turn);
}
-#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
+static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+{
+ long tmp = ACCESS_ONCE(lock->lock), try;
-/* Unlock by doing an ordered store and releasing the cacheline with nta */
-static inline void __raw_spin_unlock(raw_spinlock_t *x) {
- barrier();
- asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
+ if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
+ try = tmp + (1L << TICKET_SHIFT);
+
+ return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
+ }
+ return 0;
}
-#else /* !ASM_SUPPORTED */
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-# define __raw_spin_lock(x) \
-do { \
- __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
- __u64 ia64_spinlock_val; \
- ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
- if (unlikely(ia64_spinlock_val)) { \
- do { \
- while (*ia64_spinlock_ptr) \
- ia64_barrier(); \
- ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
- } while (ia64_spinlock_val); \
- } \
-} while (0)
-#define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0)
-#endif /* !ASM_SUPPORTED */
+static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+{
+ int *p = (int *)&lock->lock;
+
+ (void)ia64_fetchadd(1, p, rel);
+}
+
+static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+{
+ long tmp = ACCESS_ONCE(lock->lock);
+
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1));
+}
+
+static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+{
+ long tmp = ACCESS_ONCE(lock->lock);
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+ return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1;
+}
+
+static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+{
+ return __ticket_spin_is_locked(lock);
+}
+
+static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+{
+ return __ticket_spin_is_contended(lock);
+}
+#define __raw_spin_is_contended __raw_spin_is_contended
+
+static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ __ticket_spin_lock(lock);
+}
+
+static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return __ticket_spin_trylock(lock);
+}
+
+static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ __ticket_spin_unlock(lock);
+}
+
+static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+ unsigned long flags)
+{
+ __raw_spin_lock(lock);
+}
+
+static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+{
+ while (__raw_spin_is_locked(lock))
+ cpu_relax();
+}
#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f..b61d136 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -6,7 +6,7 @@
#endif
typedef struct {
- volatile unsigned int lock;
+ volatile unsigned long lock;
} raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index d0141fbf..3ddb4e7 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,6 @@
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
-#define node_to_cpumask(node) (node_to_cpu_mask[node])
#define cpumask_of_node(node) (&node_to_cpu_mask[node])
/*
@@ -104,8 +103,6 @@ void build_cpu_to_node_map(void);
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define smt_capable() (smp_num_siblings > 1)
diff --git a/arch/ia64/install.sh b/arch/ia64/install.sh
index 929e780..0e932f5 100644
--- a/arch/ia64/install.sh
+++ b/arch/ia64/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index 1d87f84..ab9b03a 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -10,7 +10,7 @@ quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 1a6e445..696eff2 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1130,95 +1130,6 @@ SET_REG(b5);
#endif /* CONFIG_IA64_BRL_EMU */
#ifdef CONFIG_SMP
- /*
- * This routine handles spinlock contention. It uses a non-standard calling
- * convention to avoid converting leaf routines into interior routines. Because
- * of this special convention, there are several restrictions:
- *
- * - do not use gp relative variables, this code is called from the kernel
- * and from modules, r1 is undefined.
- * - do not use stacked registers, the caller owns them.
- * - do not use the scratch stack space, the caller owns it.
- * - do not use any registers other than the ones listed below
- *
- * Inputs:
- * ar.pfs - saved CFM of caller
- * ar.ccv - 0 (and available for use)
- * r27 - flags from spin_lock_irqsave or 0. Must be preserved.
- * r28 - available for use.
- * r29 - available for use.
- * r30 - available for use.
- * r31 - address of lock, available for use.
- * b6 - return address
- * p14 - available for use.
- * p15 - used to track flag status.
- *
- * If you patch this code to use more registers, do not forget to update
- * the clobber lists for spin_lock() in arch/ia64/include/asm/spinlock.h.
- */
-
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-
-GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
- .prologue
- .save ar.pfs, r0 // this code effectively has a zero frame size
- .save rp, r28
- .body
- nop 0
- tbit.nz p15,p0=r27,IA64_PSR_I_BIT
- .restore sp // pop existing prologue after next insn
- mov b6 = r28
- .prologue
- .save ar.pfs, r0
- .altrp b6
- .body
- ;;
-(p15) ssm psr.i // reenable interrupts if they were on
- // DavidM says that srlz.d is slow and is not required in this case
-.wait:
- // exponential backoff, kdb, lockmeter etc. go in here
- hint @pause
- ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
- nop 0
- ;;
- cmp4.ne p14,p0=r30,r0
-(p14) br.cond.sptk.few .wait
-(p15) rsm psr.i // disable interrupts if we reenabled them
- br.cond.sptk.few b6 // lock is now free, try to acquire
- .global ia64_spinlock_contention_pre3_4_end // for kernprof
-ia64_spinlock_contention_pre3_4_end:
-END(ia64_spinlock_contention_pre3_4)
-
-#else
-
-GLOBAL_ENTRY(ia64_spinlock_contention)
- .prologue
- .altrp b6
- .body
- tbit.nz p15,p0=r27,IA64_PSR_I_BIT
- ;;
-.wait:
-(p15) ssm psr.i // reenable interrupts if they were on
- // DavidM says that srlz.d is slow and is not required in this case
-.wait2:
- // exponential backoff, kdb, lockmeter etc. go in here
- hint @pause
- ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
- ;;
- cmp4.ne p14,p0=r30,r0
- mov r30 = 1
-(p14) br.cond.sptk.few .wait2
-(p15) rsm psr.i // disable interrupts if we reenabled them
- ;;
- cmpxchg4.acq r30=[r31], r30, ar.ccv
- ;;
- cmp4.ne p14,p0=r0,r30
-(p14) br.cond.sptk.few .wait
-
- br.ret.sptk.many b6 // lock is now taken
-END(ia64_spinlock_contention)
-
-#endif
#ifdef CONFIG_HOTPLUG_CPU
GLOBAL_ENTRY(ia64_jump_to_sal)
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 8ebccb5..14d39e3 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -84,26 +84,6 @@ EXPORT_SYMBOL(ia64_save_scratch_fpregs);
#include <asm/unwind.h>
EXPORT_SYMBOL(unw_init_running);
-#ifdef ASM_SUPPORTED
-# ifdef CONFIG_SMP
-# if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-/*
- * This is not a normal routine and we don't want a function descriptor for it, so we use
- * a fake declaration here.
- */
-extern char ia64_spinlock_contention_pre3_4;
-EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4);
-# else
-/*
- * This is not a normal routine and we don't want a function descriptor for it, so we use
- * a fake declaration here.
- */
-extern char ia64_spinlock_contention;
-EXPORT_SYMBOL(ia64_spinlock_contention);
-# endif
-# endif
-#endif
-
#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
extern void esi_call_phys (void);
EXPORT_SYMBOL_GPL(esi_call_phys);
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index c475fc2..e253ab8 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -33,7 +33,8 @@ union {
struct thread_info thread_info;
} s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
-} init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{
+} init_task_mem asm ("init_task") __init_task_data =
+ {{
.task = INIT_TASK(init_task_mem.s.task),
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}};
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 93ebfea..dabeefe2 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -302,7 +302,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
- smp_call_function_mask(mm->cpu_vm_mask,
+ smp_call_function_many(mm_cpumask(mm),
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
local_irq_disable();
local_finish_flush_tlb_mm(mm);
diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c
index adb0156..5cdd7e4 100644
--- a/arch/ia64/oprofile/backtrace.c
+++ b/arch/ia64/oprofile/backtrace.c
@@ -32,24 +32,6 @@ typedef struct
u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */
} ia64_backtrace_t;
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-/*
- * Returns non-zero if the PC is in the spinlock contention out-of-line code
- * with non-standard calling sequence (on older compilers).
- */
-static __inline__ int in_old_ool_spinlock_code(unsigned long pc)
-{
- extern const char ia64_spinlock_contention_pre3_4[] __attribute__ ((weak));
- extern const char ia64_spinlock_contention_pre3_4_end[] __attribute__ ((weak));
- unsigned long sc_start = (unsigned long)ia64_spinlock_contention_pre3_4;
- unsigned long sc_end = (unsigned long)ia64_spinlock_contention_pre3_4_end;
- return (sc_start && sc_end && pc >= sc_start && pc < sc_end);
-}
-#else
-/* Newer spinlock code does a proper br.call and works fine with the unwinder */
-#define in_old_ool_spinlock_code(pc) 0
-#endif
-
/* Returns non-zero if the PC is in the Interrupt Vector Table */
static __inline__ int in_ivt_code(unsigned long pc)
{
@@ -80,7 +62,7 @@ static __inline__ int next_frame(ia64_backtrace_t *bt)
*/
if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc)
bt->frame.pfs_loc = &bt->regs->ar_pfs;
- bt->prev_pfs_loc = (in_old_ool_spinlock_code(bt->frame.ip) ? bt->frame.pfs_loc : NULL);
+ bt->prev_pfs_loc = NULL;
return unw_unwind(&bt->frame) == 0;
}
OpenPOWER on IntegriCloud