summaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/Kconfig.debug5
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/include/asm/irqflags_64.h23
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/kernel/Makefile10
-rw-r--r--arch/sparc/kernel/ftrace.c60
-rw-r--r--arch/sparc/kernel/irq_64.c31
-rw-r--r--arch/sparc/kernel/kgdb_64.c3
-rw-r--r--arch/sparc/kernel/kstack.h19
-rw-r--r--arch/sparc/kernel/nmi.c10
-rw-r--r--arch/sparc/kernel/pci_common.c11
-rw-r--r--arch/sparc/kernel/pcr.c3
-rw-r--r--arch/sparc/kernel/rtrap_64.S12
-rw-r--r--arch/sparc/kernel/smp_64.c11
-rw-r--r--arch/sparc/kernel/time_64.c4
-rw-r--r--arch/sparc/kernel/traps_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/lib/mcount.S159
20 files changed, 248 insertions, 157 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6db5136..9908d47 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,9 @@ config SPARC64
def_bool 64BIT
select ARCH_SUPPORTS_MSI
select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_FP_TEST
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_KRETPROBES
select HAVE_KPROBES
select HAVE_LMB
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 9d3c889..1b4a831 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -19,13 +19,10 @@ config DEBUG_DCFLUSH
bool "D-cache flush debugging"
depends on SPARC64 && DEBUG_KERNEL
-config STACK_DEBUG
- bool "Stack Overflow Detection Support"
-
config MCOUNT
bool
depends on SPARC64
- depends on STACK_DEBUG || FUNCTION_TRACER
+ depends on FUNCTION_TRACER
default y
config FRAME_POINTER
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 926397d..050ef35 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -17,7 +17,7 @@ typedef struct {
unsigned int __nmi_count;
unsigned long clock_tick; /* %tick's per second */
unsigned long __pad;
- unsigned int __pad1;
+ unsigned int irq0_irqs;
unsigned int __pad2;
/* Dcache line 2, rarely used */
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index 8b49bf9..bfa1ea4 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
*/
static inline unsigned long __raw_local_irq_save(void)
{
- unsigned long flags = __raw_local_save_flags();
-
- raw_local_irq_disable();
+ unsigned long flags, tmp;
+
+ /* Disable interrupts to PIL_NORMAL_MAX unless we already
+ * are using PIL_NMI, in which case PIL_NMI is retained.
+ *
+ * The only values we ever program into the %pil are 0,
+ * PIL_NORMAL_MAX and PIL_NMI.
+ *
+ * Since PIL_NMI is the largest %pil value and all bits are
+ * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
+ * actually is.
+ */
+ __asm__ __volatile__(
+ "rdpr %%pil, %0\n\t"
+ "or %0, %2, %1\n\t"
+ "wrpr %1, 0x0, %%pil"
+ : "=r" (flags), "=r" (tmp)
+ : "i" (PIL_NORMAL_MAX)
+ : "memory"
+ );
return flags;
}
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 9e2d944..4827a3a 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -111,7 +111,7 @@ struct thread_info {
#define THREAD_SHIFT PAGE_SHIFT
#endif /* PAGE_SHIFT == 13 */
-#define PREEMPT_ACTIVE 0x4000000
+#define PREEMPT_ACTIVE 0x10000000
/*
* macros/functions for gaining access to the thread information structure
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index c631614..0c2dc1f 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -13,6 +13,14 @@ extra-y += init_task.o
CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
extra-y += vmlinux.lds
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o := -pg
+CFLAGS_REMOVE_time_$(BITS).o := -pg
+CFLAGS_REMOVE_perf_event.o := -pg
+CFLAGS_REMOVE_pcr.o := -pg
+endif
+
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
obj-$(CONFIG_SPARC32) += etrap_32.o
obj-$(CONFIG_SPARC32) += rtrap_32.o
@@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-CFLAGS_REMOVE_ftrace.o := -pg
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_EARLYFB) += btext.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 9103a56..03ab022 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000;
static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
{
- static u32 call;
+ u32 call;
s32 off;
off = ((s32)addr - (s32)ip);
@@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned long ip = (unsigned long)(&ftrace_graph_call);
+ u32 old, new;
+
+ old = *(u32 *) &ftrace_graph_call;
+ new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
+ return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned long ip = (unsigned long)(&ftrace_graph_call);
+ u32 old, new;
+
+ old = *(u32 *) &ftrace_graph_call;
+ new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
+
+ return ftrace_modify_code(ip, old, new);
+}
+
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+unsigned long prepare_ftrace_return(unsigned long parent,
+ unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long) &return_to_handler;
+ struct ftrace_graph_ent trace;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return parent + 8UL;
+
+ if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
+ frame_pointer) == -EBUSY)
+ return parent + 8UL;
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ return parent + 8UL;
+ }
+
+ return return_hooker;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e1cbdb9..830d70a 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,7 +20,9 @@
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/ftrace.h>
#include <linux/irq.h>
+#include <linux/kmemleak.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
@@ -45,6 +47,7 @@
#include "entry.h"
#include "cpumap.h"
+#include "kstack.h"
#define NUM_IVECS (IMAP_INR + 1)
@@ -647,6 +650,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
if (unlikely(!bucket))
return 0;
+
+ /* The only reference we store to the IRQ bucket is
+ * by physical address which kmemleak can't see, tell
+ * it that this object explicitly is not a leak and
+ * should be scanned.
+ */
+ kmemleak_not_leak(bucket);
+
__flush_dcache_range((unsigned long) bucket,
((unsigned long) bucket +
sizeof(struct ino_bucket)));
@@ -703,25 +714,7 @@ void ack_bad_irq(unsigned int virt_irq)
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
-static __attribute__((always_inline)) void *set_hardirq_stack(void)
-{
- void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
-
- __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
- if (orig_sp < sp ||
- orig_sp > (sp + THREAD_SIZE)) {
- sp += THREAD_SIZE - 192 - STACK_BIAS;
- __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
- }
-
- return orig_sp;
-}
-static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
-{
- __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
-}
-
-void handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int irq, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index f5a0fd4..0a2bd0f 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -5,6 +5,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
+#include <linux/ftrace.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
@@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
}
#ifdef CONFIG_SMP
-void smp_kgdb_capture_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs)
{
unsigned long flags;
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 5247283..53dfb92 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -61,4 +61,23 @@ check_magic:
}
+static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+ void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+ __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+ if (orig_sp < sp ||
+ orig_sp > (sp + THREAD_SIZE)) {
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+ }
+
+ return orig_sp;
+}
+
+static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
#endif /* _KSTACK_H */
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b287b62..a4bd7ba 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -23,6 +23,8 @@
#include <asm/ptrace.h>
#include <asm/pcr.h>
+#include "kstack.h"
+
/* We don't have a real NMI on sparc64, but we can fake one
* up using profiling counter overflow interrupts and interrupt
* levels.
@@ -92,7 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
unsigned int sum, touched = 0;
- int cpu = smp_processor_id();
+ void *orig_sp;
clear_softint(1 << irq);
@@ -100,13 +102,15 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
nmi_enter();
+ orig_sp = set_hardirq_stack();
+
if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1;
else
pcr_ops->write(PCR_PIC_PRIV);
- sum = kstat_irqs_cpu(0, cpu);
+ sum = local_cpu_data().irq0_irqs;
if (__get_cpu_var(nmi_touch)) {
__get_cpu_var(nmi_touch) = 0;
touched = 1;
@@ -125,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
pcr_ops->write(pcr_enable);
}
+ restore_hardirq_stack(orig_sp);
+
nmi_exit();
}
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index b775658..8a00058 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
if (!rp) {
- prom_printf("Cannot allocate IOMMU resource.\n");
- prom_halt();
+ pr_info("%s: Cannot allocate IOMMU resource.\n",
+ pbm->name);
+ return;
}
rp->name = "IOMMU";
rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
rp->flags = IORESOURCE_BUSY;
- request_resource(&pbm->mem_space, rp);
+ if (request_resource(&pbm->mem_space, rp)) {
+ pr_info("%s: Unable to request IOMMU resource.\n",
+ pbm->name);
+ kfree(rp);
+ }
}
}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 2d94e7a..c4a6a50 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/perf_event.h>
+#include <linux/ftrace.h>
#include <asm/pil.h>
#include <asm/pcr.h>
@@ -34,7 +35,7 @@ unsigned int picl_shift;
* Therefore in such situations we defer the work by signalling
* a lower level cpu IRQ.
*/
-void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
+void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs;
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 83f1873..090b9e9 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -130,7 +130,17 @@ rtrap_xcall:
nop
call trace_hardirqs_on
nop
- wrpr %l4, %pil
+ /* Do not actually set the %pil here. We will do that
+ * below after we clear PSTATE_IE in the %pstate register.
+ * If we re-enable interrupts here, we can recurse down
+ * the hardirq stack potentially endlessly, causing a
+ * stack overflow.
+ *
+ * It is tempting to put this test and trace_hardirqs_on
+ * call at the 'rt_continue' label, but that will not work
+ * as that path hits unconditionally and we do not want to
+ * execute this in NMI return paths, for example.
+ */
#endif
rtrap_no_irq_enable:
andcc %l1, TSTATE_PRIV, %l3
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 4c53345..b6a2b8f 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -22,6 +22,7 @@
#include <linux/profile.h>
#include <linux/bootmem.h>
#include <linux/vmalloc.h>
+#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/slab.h>
@@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu)
&cpumask_of_cpu(cpu));
}
-void smp_call_function_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
generic_smp_call_function_interrupt();
}
-void smp_call_function_single_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
generic_smp_call_function_single_interrupt();
@@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
put_cpu();
}
-void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
{
struct mm_struct *mm;
unsigned long flags;
@@ -1149,7 +1150,7 @@ void smp_release(void)
*/
extern void prom_world(int);
-void smp_penguin_jailcell(int irq, struct pt_regs *regs)
+void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
@@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu)
&cpumask_of_cpu(cpu));
}
-void smp_receive_signal_client(int irq, struct pt_regs *regs)
+void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
}
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 67e1651..c7bbe6cf 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -35,6 +35,7 @@
#include <linux/clocksource.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/ftrace.h>
#include <asm/oplib.h>
#include <asm/timer.h>
@@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = {
};
static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
-void timer_interrupt(int irq, struct pt_regs *regs)
+void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long tick_mask = tick_ops->softint_mask;
@@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
irq_enter();
+ local_cpu_data().irq0_irqs++;
kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
if (unlikely(!evt->event_handler)) {
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 837dfc2..9da57f0 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2203,27 +2203,6 @@ void dump_stack(void)
EXPORT_SYMBOL(dump_stack);
-static inline int is_kernel_stack(struct task_struct *task,
- struct reg_window *rw)
-{
- unsigned long rw_addr = (unsigned long) rw;
- unsigned long thread_base, thread_end;
-
- if (rw_addr < PAGE_OFFSET) {
- if (task != &init_task)
- return 0;
- }
-
- thread_base = (unsigned long) task_stack_page(task);
- thread_end = thread_base + sizeof(union thread_union);
- if (rw_addr >= thread_base &&
- rw_addr < thread_end &&
- !(rw_addr & 0x7UL))
- return 1;
-
- return 0;
-}
-
static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
{
unsigned long fp = rw->ins[6];
@@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
show_regs(regs);
add_taint(TAINT_DIE);
if (regs->tstate & TSTATE_PRIV) {
+ struct thread_info *tp = current_thread_info();
struct reg_window *rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
@@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
* find some badly aligned kernel stack.
*/
while (rw &&
- count++ < 30&&
- is_kernel_stack(current, rw)) {
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
printk("Caller[%016lx]: %pS\n", rw->ins[7],
(void *) rw->ins[7]);
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index ebce430..c752c4c 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
}
/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
-static inline int decode_access_size(unsigned int insn)
+static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
{
unsigned int tmp;
@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
- die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
+ die_if_kernel("Byte sized unaligned access?!?!", regs);
/* GCC should never warn that control reaches the end
* of this function without returning a value because
@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
- int size = decode_access_size(insn);
+ int size = decode_access_size(regs, insn);
int orig_asi, asi;
current_thread_info()->kern_una_regs = regs;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 4e59925..0c1e678 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -46,11 +46,16 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
*(.gnu.warning)
} = 0
_etext = .;
RO_DATA(PAGE_SIZE)
+
+ /* Start of data section */
+ _sdata = .;
+
.data1 : {
*(.data1)
}
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 24b8b12..3ad6cbd 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -7,26 +7,11 @@
#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-
/*
* This is the main variant and is called by C code. GCC's -pg option
* automatically instruments every C function with a call to this.
*/
-#ifdef CONFIG_STACK_DEBUG
-
-#define OVSTACKSIZE 4096 /* lets hope this is enough */
-
- .data
- .align 8
-panicstring:
- .asciz "Stack overflow\n"
- .align 8
-ovstack:
- .skip OVSTACKSIZE
-#endif
.text
.align 32
.globl _mcount
@@ -35,84 +20,48 @@ ovstack:
.type mcount,#function
_mcount:
mcount:
-#ifdef CONFIG_STACK_DEBUG
- /*
- * Check whether %sp is dangerously low.
- */
- ldub [%g6 + TI_FPDEPTH], %g1
- srl %g1, 1, %g3
- add %g3, 1, %g3
- sllx %g3, 8, %g3 ! each fpregs frame is 256b
- add %g3, 192, %g3
- add %g6, %g3, %g3 ! where does task_struct+frame end?
- sub %g3, STACK_BIAS, %g3
- cmp %sp, %g3
- bg,pt %xcc, 1f
- nop
- lduh [%g6 + TI_CPU], %g1
- sethi %hi(hardirq_stack), %g3
- or %g3, %lo(hardirq_stack), %g3
- sllx %g1, 3, %g1
- ldx [%g3 + %g1], %g7
- sub %g7, STACK_BIAS, %g7
- cmp %sp, %g7
- bleu,pt %xcc, 2f
- sethi %hi(THREAD_SIZE), %g3
- add %g7, %g3, %g7
- cmp %sp, %g7
- blu,pn %xcc, 1f
-2: sethi %hi(softirq_stack), %g3
- or %g3, %lo(softirq_stack), %g3
- ldx [%g3 + %g1], %g7
- sub %g7, STACK_BIAS, %g7
- cmp %sp, %g7
- bleu,pt %xcc, 3f
- sethi %hi(THREAD_SIZE), %g3
- add %g7, %g3, %g7
- cmp %sp, %g7
- blu,pn %xcc, 1f
- nop
- /* If we are already on ovstack, don't hop onto it
- * again, we are already trying to output the stack overflow
- * message.
- */
-3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
- or %g7, %lo(ovstack), %g7
- add %g7, OVSTACKSIZE, %g3
- sub %g3, STACK_BIAS + 192, %g3
- sub %g7, STACK_BIAS, %g7
- cmp %sp, %g7
- blu,pn %xcc, 2f
- cmp %sp, %g3
- bleu,pn %xcc, 1f
- nop
-2: mov %g3, %sp
- sethi %hi(panicstring), %g3
- call prom_printf
- or %g3, %lo(panicstring), %o0
- call prom_halt
- nop
-1:
-#endif
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
- mov %o7, %o0
- .globl mcount_call
-mcount_call:
- call ftrace_stub
- mov %o0, %o7
+ /* Do nothing, the retl/nop below is all we need. */
#else
- sethi %hi(ftrace_trace_function), %g1
+ sethi %hi(function_trace_stop), %g1
+ lduw [%g1 + %lo(function_trace_stop)], %g2
+ brnz,pn %g2, 2f
+ sethi %hi(ftrace_trace_function), %g1
sethi %hi(ftrace_stub), %g2
ldx [%g1 + %lo(ftrace_trace_function)], %g1
or %g2, %lo(ftrace_stub), %g2
cmp %g1, %g2
be,pn %icc, 1f
- mov %i7, %o1
- jmpl %g1, %g0
- mov %o7, %o0
+ mov %i7, %g3
+ save %sp, -176, %sp
+ mov %g3, %o1
+ jmpl %g1, %o7
+ mov %i7, %o0
+ ret
+ restore
/* not reached */
1:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ sethi %hi(ftrace_graph_return), %g1
+ ldx [%g1 + %lo(ftrace_graph_return)], %g3
+ cmp %g2, %g3
+ bne,pn %xcc, 5f
+ sethi %hi(ftrace_graph_entry_stub), %g2
+ sethi %hi(ftrace_graph_entry), %g1
+ or %g2, %lo(ftrace_graph_entry_stub), %g2
+ ldx [%g1 + %lo(ftrace_graph_entry)], %g1
+ cmp %g1, %g2
+ be,pt %xcc, 2f
+ nop
+5: mov %i7, %g2
+ mov %fp, %g3
+ save %sp, -176, %sp
+ mov %g2, %l0
+ ba,pt %xcc, ftrace_graph_caller
+ mov %g3, %l1
+#endif
+2:
#endif
#endif
retl
@@ -131,14 +80,50 @@ ftrace_stub:
.globl ftrace_caller
.type ftrace_caller,#function
ftrace_caller:
- mov %i7, %o1
- mov %o7, %o0
+ sethi %hi(function_trace_stop), %g1
+ mov %i7, %g2
+ lduw [%g1 + %lo(function_trace_stop)], %g1
+ brnz,pn %g1, ftrace_stub
+ mov %fp, %g3
+ save %sp, -176, %sp
+ mov %g2, %o1
+ mov %g2, %l0
+ mov %g3, %l1
.globl ftrace_call
ftrace_call:
call ftrace_stub
- mov %o0, %o7
- retl
+ mov %i7, %o0
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call
+ftrace_graph_call:
+ call ftrace_stub
nop
+#endif
+ ret
+ restore
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .size ftrace_graph_call,.-ftrace_graph_call
+#endif
+ .size ftrace_call,.-ftrace_call
.size ftrace_caller,.-ftrace_caller
#endif
#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+ mov %l0, %o0
+ mov %i7, %o1
+ call prepare_ftrace_return
+ mov %l1, %o2
+ ret
+ restore %o0, -8, %i7
+END(ftrace_graph_caller)
+
+ENTRY(return_to_handler)
+ save %sp, -176, %sp
+ call ftrace_return_to_handler
+ mov %fp, %o0
+ jmpl %o0 + 8, %g0
+ restore
+END(return_to_handler)
+#endif
OpenPOWER on IntegriCloud