summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/entry-armv.S
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-01-13 15:06:28 +0900
committerPaul Mundt <lethal@linux-sh.org>2011-01-13 15:06:28 +0900
commitf43dc23d5ea91fca257be02138a255f02d98e806 (patch)
treeb29722f6e965316e90ac97abf79923ced250dc21 /arch/arm/kernel/entry-armv.S
parentf8e53553f452dcbf67cb89c8cba63a1cd6eb4cc0 (diff)
parent4162cf64973df51fc885825bc9ca4d055891c49f (diff)
downloadop-kernel-dev-f43dc23d5ea91fca257be02138a255f02d98e806.zip
op-kernel-dev-f43dc23d5ea91fca257be02138a255f02d98e806.tar.gz
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts: arch/sh/kernel/cpu/sh2/setup-sh7619.c arch/sh/kernel/cpu/sh2a/setup-mxg.c arch/sh/kernel/cpu/sh2a/setup-sh7201.c arch/sh/kernel/cpu/sh2a/setup-sh7203.c arch/sh/kernel/cpu/sh2a/setup-sh7206.c arch/sh/kernel/cpu/sh3/setup-sh7705.c arch/sh/kernel/cpu/sh3/setup-sh770x.c arch/sh/kernel/cpu/sh3/setup-sh7710.c arch/sh/kernel/cpu/sh3/setup-sh7720.c arch/sh/kernel/cpu/sh4/setup-sh4-202.c arch/sh/kernel/cpu/sh4/setup-sh7750.c arch/sh/kernel/cpu/sh4/setup-sh7760.c arch/sh/kernel/cpu/sh4a/setup-sh7343.c arch/sh/kernel/cpu/sh4a/setup-sh7366.c arch/sh/kernel/cpu/sh4a/setup-sh7722.c arch/sh/kernel/cpu/sh4a/setup-sh7723.c arch/sh/kernel/cpu/sh4a/setup-sh7724.c arch/sh/kernel/cpu/sh4a/setup-sh7763.c arch/sh/kernel/cpu/sh4a/setup-sh7770.c arch/sh/kernel/cpu/sh4a/setup-sh7780.c arch/sh/kernel/cpu/sh4a/setup-sh7785.c arch/sh/kernel/cpu/sh4a/setup-sh7786.c arch/sh/kernel/cpu/sh4a/setup-shx3.c arch/sh/kernel/cpu/sh5/setup-sh5.c drivers/serial/sh-sci.c drivers/serial/sh-sci.h include/linux/serial_sci.h
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r--arch/arm/kernel/entry-armv.S325
1 files changed, 169 insertions, 156 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index fc8af43..2b46fea 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -21,42 +21,26 @@
#include <mach/entry-macro.S>
#include <asm/thread_notify.h>
#include <asm/unwind.h>
+#include <asm/unistd.h>
+#include <asm/tls.h>
#include "entry-header.S"
+#include <asm/entry-macro-multi.S>
/*
* Interrupt handling. Preserves r7, r8, r9
*/
.macro irq_handler
- get_irqnr_preamble r5, lr
-1: get_irqnr_and_base r0, r6, r5, lr
- movne r1, sp
- @
- @ routine called with r0 = irq number, r1 = struct pt_regs *
- @
- adrne lr, 1b
- bne asm_do_IRQ
-
-#ifdef CONFIG_SMP
- /*
- * XXX
- *
- * this macro assumes that irqstat (r6) and base (r5) are
- * preserved from get_irqnr_and_base above
- */
- test_for_ipi r0, r6, r5, lr
- movne r0, sp
- adrne lr, 1b
- bne do_IPI
-
-#ifdef CONFIG_LOCAL_TIMERS
- test_for_ltirq r0, r6, r5, lr
- movne r0, sp
- adrne lr, 1b
- bne do_local_timer
-#endif
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+ ldr r5, =handle_arch_irq
+ mov r0, sp
+ ldr r5, [r5]
+ adr lr, BSYM(9997f)
+ teq r5, #0
+ movne pc, r5
#endif
-
+ arch_irq_handler_default
+9997:
.endm
#ifdef CONFIG_KPROBES
@@ -70,7 +54,10 @@
*/
.macro inv_entry, reason
sub sp, sp, #S_FRAME_SIZE
- stmib sp, {r1 - lr}
+ ARM( stmib sp, {r1 - lr} )
+ THUMB( stmia sp, {r0 - r12} )
+ THUMB( str sp, [sp, #S_SP] )
+ THUMB( str lr, [sp, #S_LR] )
mov r1, #\reason
.endm
@@ -126,17 +113,24 @@ ENDPROC(__und_invalid)
.macro svc_entry, stack_hole=0
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole)
+ sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+#ifdef CONFIG_THUMB2_KERNEL
+ SPFIX( str r0, [sp] ) @ temporarily saved
+ SPFIX( mov r0, sp )
+ SPFIX( tst r0, #4 ) @ test original stack alignment
+ SPFIX( ldr r0, [sp] ) @ restored
+#else
SPFIX( tst sp, #4 )
- SPFIX( bicne sp, sp, #4 )
- stmib sp, {r1 - r12}
+#endif
+ SPFIX( subeq sp, sp, #4 )
+ stmia sp, {r1 - r12}
ldmia r0, {r1 - r3}
- add r5, sp, #S_SP @ here for interlock avoidance
+ add r5, sp, #S_SP - 4 @ here for interlock avoidance
mov r4, #-1 @ "" "" "" ""
- add r0, sp, #(S_FRAME_SIZE + \stack_hole)
- SPFIX( addne r0, r0, #4 )
- str r1, [sp] @ save the "real" r0 copied
+ add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ SPFIX( addeq r0, r0, #4 )
+ str r1, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
mov r1, lr
@@ -184,6 +178,7 @@ __dabt_svc:
@
@ set desired IRQ state, then call main handler
@
+ debug_entry r1
msr cpsr_c, r9
mov r2, sp
bl do_DataAbort
@@ -191,14 +186,13 @@ __dabt_svc:
@
@ IRQs off again before pulling preserved data off the stack
@
- disable_irq
+ disable_irq_notrace
@
@ restore SPSR and restart the instruction
@
- ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ ldr r2, [sp, #S_PSR]
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
@@ -225,13 +219,12 @@ __irq_svc:
tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
#endif
- ldr r0, [sp, #S_PSR] @ irqs are already disabled
- msr spsr_cxsf, r0
+ ldr r4, [sp, #S_PSR] @ irqs are already disabled
#ifdef CONFIG_TRACE_IRQFLAGS
- tst r0, #PSR_I_BIT
+ tst r4, #PSR_I_BIT
bleq trace_hardirqs_on
#endif
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ svc_exit r4 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
@@ -265,8 +258,16 @@ __und_svc:
@
@ r0 - instruction
@
+#ifndef CONFIG_THUMB2_KERNEL
ldr r0, [r2, #-4]
- adr r9, 1f
+#else
+ ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2
+ and r9, r0, #0xf800
+ cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
+ ldrhhs r9, [r2] @ bottom 16 bits
+ orrhs r0, r9, r0, lsl #16
+#endif
+ adr r9, BSYM(1f)
bl call_fpe
mov r0, sp @ struct pt_regs *regs
@@ -275,14 +276,13 @@ __und_svc:
@
@ IRQs off again before pulling preserved data off the stack
@
-1: disable_irq
+1: disable_irq_notrace
@
@ restore SPSR and restart the instruction
@
- ldr lr, [sp, #S_PSR] @ Get SVC cpsr
- msr spsr_cxsf, lr
- ldmia sp, {r0 - pc}^ @ Restore SVC registers
+ ldr r2, [sp, #S_PSR] @ Get SVC cpsr
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__und_svc)
@@ -297,35 +297,29 @@ __pabt_svc:
tst r3, #PSR_I_BIT
biceq r9, r9, #PSR_I_BIT
- @
- @ set args, then call main handler
- @
- @ r0 - address of faulting instruction
- @ r1 - pointer to registers on stack
- @
-#ifdef MULTI_PABORT
mov r0, r2 @ pass address of aborted instruction.
+#ifdef MULTI_PABORT
ldr r4, .LCprocfns
mov lr, pc
ldr pc, [r4, #PROCESSOR_PABT_FUNC]
#else
- CPU_PABORT_HANDLER(r0, r2)
+ bl CPU_PABORT_HANDLER
#endif
+ debug_entry r1
msr cpsr_c, r9 @ Maybe enable interrupts
- mov r1, sp @ regs
+ mov r2, sp @ regs
bl do_PrefetchAbort @ call abort handler
@
@ IRQs off again before pulling preserved data off the stack
@
- disable_irq
+ disable_irq_notrace
@
@ restore SPSR and restart the instruction
@
- ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ ldr r2, [sp, #S_PSR]
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__pabt_svc)
@@ -353,7 +347,8 @@ ENDPROC(__pabt_svc)
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE
- stmib sp, {r1 - r12}
+ ARM( stmib sp, {r1 - r12} )
+ THUMB( stmia sp, {r0 - r12} )
ldmia r0, {r1 - r3}
add r0, sp, #S_PC @ here for interlock avoidance
@@ -372,7 +367,8 @@ ENDPROC(__pabt_svc)
@ Also, separately save sp_usr and lr_usr
@
stmia r0, {r2 - r4}
- stmdb r0, {sp, lr}^
+ ARM( stmdb r0, {sp, lr}^ )
+ THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
@
@ Enable the alignment trap while in kernel mode
@@ -425,9 +421,10 @@ __dabt_usr:
@
@ IRQs on, then call the main handler
@
+ debug_entry r1
enable_irq
mov r2, sp
- adr lr, ret_from_exception
+ adr lr, BSYM(ret_from_exception)
b do_DataAbort
UNWIND(.fnend )
ENDPROC(__dabt_usr)
@@ -437,9 +434,6 @@ __irq_usr:
usr_entry
kuser_cmpxchg_check
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -452,10 +446,9 @@ __irq_usr:
ldr r0, [tsk, #TI_PREEMPT]
str r8, [tsk, #TI_PREEMPT]
teq r0, r7
- strne r0, [r0, -r0]
-#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
+ ARM( strne r0, [r0, -r0] )
+ THUMB( movne r0, #0 )
+ THUMB( strne r0, [r0] )
#endif
mov why, #0
@@ -476,9 +469,10 @@ __und_usr:
@
@ r0 - instruction
@
- adr r9, ret_from_exception
- adr lr, __und_usr_unknown
+ adr r9, BSYM(ret_from_exception)
+ adr lr, BSYM(__und_usr_unknown)
tst r3, #PSR_T_BIT @ Thumb mode?
+ itet eq @ explicit IT needed for the 1f label
subeq r4, r2, #4 @ ARM instr at LR - 4
subne r4, r2, #2 @ Thumb instr at LR - 2
1: ldreqt r0, [r4]
@@ -488,7 +482,10 @@ __und_usr:
beq call_fpe
@ Thumb instruction
#if __LINUX_ARM_ARCH__ >= 7
-2: ldrht r5, [r4], #2
+2:
+ ARM( ldrht r5, [r4], #2 )
+ THUMB( ldrht r5, [r4] )
+ THUMB( add r4, r4, #2 )
and r0, r5, #0xf800 @ mask bits 111x x... .... ....
cmp r0, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_unknown
@@ -508,16 +505,16 @@ ENDPROC(__und_usr)
/*
* The out of line fixup for the ldrt above.
*/
- .section .fixup, "ax"
+ .pushsection .fixup, "ax"
4: mov pc, r9
- .previous
- .section __ex_table,"a"
+ .popsection
+ .pushsection __ex_table,"a"
.long 1b, 4b
#if __LINUX_ARM_ARCH__ >= 7
.long 2b, 4b
.long 3b, 4b
#endif
- .previous
+ .popsection
/*
* Check whether the instruction is a co-processor instruction.
@@ -577,9 +574,11 @@ call_fpe:
moveq pc, lr
get_thread_info r10 @ get current thread
and r8, r0, #0x00000f00 @ mask out CP number
+ THUMB( lsr r8, r8, #8 )
mov r7, #1
add r6, r10, #TI_USED_CP
- strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
+ ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
+ THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
@@ -587,36 +586,38 @@ call_fpe:
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable
#endif
- add pc, pc, r8, lsr #6
- mov r0, r0
-
- mov pc, lr @ CP#0
- b do_fpe @ CP#1 (FPE)
- b do_fpe @ CP#2 (FPE)
- mov pc, lr @ CP#3
+ ARM( add pc, pc, r8, lsr #6 )
+ THUMB( lsl r8, r8, #2 )
+ THUMB( add pc, r8 )
+ nop
+
+ movw_pc lr @ CP#0
+ W(b) do_fpe @ CP#1 (FPE)
+ W(b) do_fpe @ CP#2 (FPE)
+ movw_pc lr @ CP#3
#ifdef CONFIG_CRUNCH
b crunch_task_enable @ CP#4 (MaverickCrunch)
b crunch_task_enable @ CP#5 (MaverickCrunch)
b crunch_task_enable @ CP#6 (MaverickCrunch)
#else
- mov pc, lr @ CP#4
- mov pc, lr @ CP#5
- mov pc, lr @ CP#6
+ movw_pc lr @ CP#4
+ movw_pc lr @ CP#5
+ movw_pc lr @ CP#6
#endif
- mov pc, lr @ CP#7
- mov pc, lr @ CP#8
- mov pc, lr @ CP#9
+ movw_pc lr @ CP#7
+ movw_pc lr @ CP#8
+ movw_pc lr @ CP#9
#ifdef CONFIG_VFP
- b do_vfp @ CP#10 (VFP)
- b do_vfp @ CP#11 (VFP)
+ W(b) do_vfp @ CP#10 (VFP)
+ W(b) do_vfp @ CP#11 (VFP)
#else
- mov pc, lr @ CP#10 (VFP)
- mov pc, lr @ CP#11 (VFP)
+ movw_pc lr @ CP#10 (VFP)
+ movw_pc lr @ CP#11 (VFP)
#endif
- mov pc, lr @ CP#12
- mov pc, lr @ CP#13
- mov pc, lr @ CP#14 (Debug)
- mov pc, lr @ CP#15 (Control)
+ movw_pc lr @ CP#12
+ movw_pc lr @ CP#13
+ movw_pc lr @ CP#14 (Debug)
+ movw_pc lr @ CP#15 (Control)
#ifdef CONFIG_NEON
.align 6
@@ -657,17 +658,19 @@ do_fpe:
* lr = unrecognised FP instruction return address
*/
- .data
+ .pushsection .data
ENTRY(fp_enter)
.word no_fp
- .previous
+ .popsection
-no_fp: mov pc, lr
+ENTRY(no_fp)
+ mov pc, lr
+ENDPROC(no_fp)
__und_usr_unknown:
enable_irq
mov r0, sp
- adr lr, ret_from_exception
+ adr lr, BSYM(ret_from_exception)
b do_undefinstr
ENDPROC(__und_usr_unknown)
@@ -675,16 +678,17 @@ ENDPROC(__und_usr_unknown)
__pabt_usr:
usr_entry
-#ifdef MULTI_PABORT
mov r0, r2 @ pass address of aborted instruction.
+#ifdef MULTI_PABORT
ldr r4, .LCprocfns
mov lr, pc
ldr pc, [r4, #PROCESSOR_PABT_FUNC]
#else
- CPU_PABORT_HANDLER(r0, r2)
+ bl CPU_PABORT_HANDLER
#endif
+ debug_entry r1
enable_irq @ Enable interrupts
- mov r1, sp @ regs
+ mov r2, sp @ regs
bl do_PrefetchAbort @ call abort handler
UNWIND(.fnend )
/* fall through */
@@ -711,24 +715,20 @@ ENTRY(__switch_to)
UNWIND(.cantunwind )
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
- stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
-#ifdef CONFIG_MMU
+ ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
+ THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
+ THUMB( str sp, [ip], #4 )
+ THUMB( str lr, [ip], #4 )
+#ifdef CONFIG_CPU_USE_DOMAINS
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
-#if __LINUX_ARM_ARCH__ >= 6
-#ifdef CONFIG_CPU_32v6K
- clrex
-#else
- strex r5, r4, [ip] @ Clear exclusive monitor
-#endif
+ set_tls r3, r4, r5
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ ldr r7, [r2, #TI_TASK]
+ ldr r8, =__stack_chk_guard
+ ldr r7, [r7, #TSK_STACK_CANARY]
#endif
-#if defined(CONFIG_HAS_TLS_REG)
- mcr p15, 0, r3, c13, c0, 3 @ set TLS register
-#elif !defined(CONFIG_TLS_REG_EMUL)
- mov r4, #0xffff0fff
- str r3, [r4, #-15] @ TLS val at 0xffff0ff0
-#endif
-#ifdef CONFIG_MMU
+#ifdef CONFIG_CPU_USE_DOMAINS
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
mov r5, r0
@@ -736,8 +736,15 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ str r7, [r8]
+#endif
+ THUMB( mov ip, r4 )
mov r0, r5
- ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+ ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
+ THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
+ THUMB( ldr sp, [ip], #4 )
+ THUMB( ldr pc, [ip] )
UNWIND(.fnend )
ENDPROC(__switch_to)
@@ -772,6 +779,7 @@ ENDPROC(__switch_to)
* if your compiled code is not going to use the new instructions for other
* purpose.
*/
+ THUMB( .arm )
.macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB
@@ -818,7 +826,7 @@ __kuser_helper_start:
*/
__kuser_memory_barrier: @ 0xffff0fa0
- smp_dmb
+ smp_dmb arm
usr_ret lr
.align 5
@@ -887,10 +895,10 @@ __kuser_cmpxchg: @ 0xffff0fc0
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
- mov r7, #0xff00 @ 0xfff0 into r7 for EABI
- orr r7, r7, #0xf0
- swi #0x9ffff0
+ ldr r7, 1f @ it's 20 bits
+ swi __ARM_NR_cmpxchg
ldmfd sp!, {r7, pc}
+1: .word __ARM_NR_cmpxchg
#elif __LINUX_ARM_ARCH__ < 6
@@ -935,9 +943,7 @@ kuser_cmpxchg_fixup:
#else
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c10, 5 @ dmb
-#endif
+ smp_dmb arm
1: ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
@@ -945,11 +951,8 @@ kuser_cmpxchg_fixup:
beq 1b
rsbs r0, r3, #0
/* beware -- each __kuser slot must be 8 instructions max */
-#ifdef CONFIG_SMP
- b __kuser_memory_barrier
-#else
- usr_ret lr
-#endif
+ ALT_SMP(b __kuser_memory_barrier)
+ ALT_UP(usr_ret lr)
#endif
@@ -989,17 +992,12 @@ kuser_cmpxchg_fixup:
*/
__kuser_get_tls: @ 0xffff0fe0
-
-#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
- ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
-#else
- mrc p15, 0, r0, c13, c0, 3 @ read TLS register
-#endif
+ ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
-
- .rep 5
- .word 0 @ pad up to __kuser_helper_version
- .endr
+ mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
+ .rep 4
+ .word 0 @ 0xffff0ff0 software TLS value, then
+ .endr @ pad up to __kuser_helper_version
/*
* Reference declaration:
@@ -1020,6 +1018,7 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+ THUMB( .thumb )
/*
* Vector stubs.
@@ -1054,17 +1053,23 @@ vector_\name:
@ Prepare for SVC32 mode. IRQs remain disabled.
@
mrs r0, cpsr
- eor r0, r0, #(\mode ^ SVC_MODE)
+ eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
msr spsr_cxsf, r0
@
@ the branch table must immediately follow this code
@
and lr, lr, #0x0f
+ THUMB( adr r0, 1f )
+ THUMB( ldr lr, [r0, lr, lsl #2] )
mov r0, sp
- ldr lr, [pc, lr, lsl #2]
+ ARM( ldr lr, [pc, lr, lsl #2] )
movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
+
+ .align 2
+ @ handler addresses follow this label
+1:
.endm
.globl __stubs_start
@@ -1202,14 +1207,16 @@ __stubs_end:
.globl __vectors_start
__vectors_start:
- swi SYS_ERROR0
- b vector_und + stubs_offset
- ldr pc, .LCvswi + stubs_offset
- b vector_pabt + stubs_offset
- b vector_dabt + stubs_offset
- b vector_addrexcptn + stubs_offset
- b vector_irq + stubs_offset
- b vector_fiq + stubs_offset
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ W(b) vector_und + stubs_offset
+ W(ldr) pc, .LCvswi + stubs_offset
+ W(b) vector_pabt + stubs_offset
+ W(b) vector_dabt + stubs_offset
+ W(b) vector_addrexcptn + stubs_offset
+ W(b) vector_irq + stubs_offset
+ W(b) vector_fiq + stubs_offset
.globl __vectors_end
__vectors_end:
@@ -1222,3 +1229,9 @@ cr_alignment:
.space 4
cr_no_alignment:
.space 4
+
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+ .globl handle_arch_irq
+handle_arch_irq:
+ .space 4
+#endif
OpenPOWER on IntegriCloud