From 7a0ee92b4a510bc2dd026333f90031e883e0cde0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 23 Jun 2011 22:00:20 +0100 Subject: ARM: pm: proc-v7: fix missing struct processor pointers for suspend code Add the missing suspend/resume pointers for the suspend code. This is needed when building for multiple CPUs. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mm/proc-v7.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3c38678..e27c011 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -418,9 +418,9 @@ ENTRY(v7_processor_functions) .word cpu_v7_dcache_clean_area .word cpu_v7_switch_mm .word cpu_v7_set_pte_ext - .word 0 - .word 0 - .word 0 + .word cpu_v7_suspend_size + .word cpu_v7_do_suspend + .word cpu_v7_do_resume .size v7_processor_functions, . - v7_processor_functions .section ".rodata" -- cgit v1.1 From 111b20d01346b9635b3223c7af4e40e43bee8dc6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 22 Jun 2011 15:41:58 +0100 Subject: ARM: pm: ensure ARMv7 CPUs save and restore the TLS register Ensure that the TLS register is saved and restored over a suspend cycle, so that userspace programs don't see a corrupted TLS value. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mm/proc-v7.S | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index e27c011..089c0b5 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -210,19 +210,21 @@ cpu_v7_name: /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size -.equ cpu_v7_suspend_size, 4 * 8 +.equ cpu_v7_suspend_size, 4 * 9 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID mrc p15, 0, r5, c13, c0, 1 @ Context ID + mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID + stmia r0!, {r4 - r6} mrc p15, 0, r6, c3, c0, 0 @ Domain ID mrc p15, 0, r7, c2, c0, 0 @ TTB 0 mrc p15, 0, r8, c2, c0, 1 @ TTB 1 mrc p15, 0, r9, c1, c0, 0 @ Control register mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control - stmia r0, {r4 - r11} + stmia r0, {r6 - r11} ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_v7_do_suspend) @@ -230,9 +232,11 @@ ENTRY(cpu_v7_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache - ldmia r0, {r4 - r11} + ldmia r0!, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID mcr p15, 0, r5, c13, c0, 1 @ Context ID + mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID + ldmia r0, {r6 - r11} mcr p15, 0, r6, c3, c0, 0 @ Domain ID mcr p15, 0, r7, c2, c0, 0 @ TTB 0 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 -- cgit v1.1 From b69874e4f530b0103e507f695c010d00cb85a4df Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jun 2011 18:57:31 +0100 Subject: ARM: pm: arrange for cpu_proc_init() to be called on resume cpu_proc_init() does processor specific initialization, which we do at boot time. We have been omitting to do this on resume, which causes some of this initialization to be skipped. We've also been skipping this on SMP initialization too. Ensure that cpu_proc_init() is always called appropriately by moving it into cpu_init(), and move cpu_init() to a more appropriate point in the boot initialization. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mm/proc-sa1100.S | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 184a9c9..e9c4727 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -34,7 +34,7 @@ */ #define DCACHELINESIZE 32 - __INIT + .section .text /* * cpu_sa1100_proc_init() @@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init) mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland mov pc, lr - .section .text - /* * cpu_sa1100_proc_fin() * -- cgit v1.1 From be020f8618caa0670a2a5b5a5df79549520f7867 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 13:42:01 +0100 Subject: ARM: entry: abort-macro: specify registers to be used for macros Require all callers of abort macros to specify the registers to be used. This improves the documentation at the callsites as to which registers are being used by this assembly code. Signed-off-by: Russell King --- arch/arm/mm/abort-ev4t.S | 2 +- arch/arm/mm/abort-ev5t.S | 4 ++-- arch/arm/mm/abort-ev5tj.S | 4 ++-- arch/arm/mm/abort-ev6.S | 4 ++-- arch/arm/mm/abort-macro.S | 30 +++++++++++++++--------------- 5 files changed, 22 insertions(+), 22 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index b628254..9910123 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S @@ -22,7 +22,7 @@ ENTRY(v4t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort + do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 ldreq r3, [r2] @ read aborted ARM instruction bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ check write diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 02251b5..800e8d4 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -22,10 +22,10 @@ ENTRY(v5t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort + do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 ldreq r3, [r2] @ read aborted ARM instruction bic r1, r1, #1 << 11 @ clear bits 11 of FSR - do_ldrd_abort + do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 mov pc, lr diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index bce68d6..bcb58d2 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -25,9 +25,9 @@ ENTRY(v5tj_early_abort) bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #PSR_J_BIT @ Java? movne pc, lr - do_thumb_abort + do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 ldreq r3, [r2] @ read aborted ARM instruction - do_ldrd_abort + do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 1478aa5..ef526e7 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -35,12 +35,12 @@ ENTRY(v6_early_abort) bic r1, r1, #1 << 11 @ clear bit 11 of FSR tst r3, #PSR_J_BIT @ Java? movne pc, lr - do_thumb_abort + do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 ldreq r3, [r2] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 reveq r3, r3 #endif - do_ldrd_abort + do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index d7cb1bf..8d3b9f9 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -9,33 +9,33 @@ * */ - .macro do_thumb_abort - tst r3, #PSR_T_BIT + .macro do_thumb_abort, fsr, pc, psr, tmp + tst \psr, #PSR_T_BIT beq not_thumb - ldrh r3, [r2] @ Read aborted Thumb instruction - and r3, r3, # 0xfe00 @ Mask opcode field - cmp r3, # 0x5600 @ Is it ldrsb? - orreq r3, r3, #1 << 11 @ Set L-bit if yes - tst r3, #1 << 11 @ L = 0 -> write - orreq r1, r1, #1 << 11 @ yes. + ldrh \tmp, [\pc] @ Read aborted Thumb instruction + and \tmp, \tmp, # 0xfe00 @ Mask opcode field + cmp \tmp, # 0x5600 @ Is it ldrsb? + orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes + tst \tmp, #1 << 11 @ L = 0 -> write + orreq \psr, \psr, #1 << 11 @ yes. mov pc, lr not_thumb: .endm /* - * We check for the following insturction encoding for LDRD. + * We check for the following instruction encoding for LDRD. * - * [27:25] == 0 + * [27:25] == 000 * [7:4] == 1101 * [20] == 0 */ - .macro do_ldrd_abort - tst r3, #0x0e000000 @ [27:25] == 0 + .macro do_ldrd_abort, tmp, insn + tst \insn, #0x0e000000 @ [27:25] == 0 bne not_ldrd - and r2, r3, #0x000000f0 @ [7:4] == 1101 - cmp r2, #0x000000d0 + and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 + cmp \tmp, #0x000000d0 bne not_ldrd - tst r3, #1 << 20 @ [20] == 0 + tst \insn, #1 << 20 @ [20] == 0 moveq pc, lr not_ldrd: .endm -- cgit v1.1 From 198a0a927ab9c52a68297120ee6dd4e36a975b0e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 13:51:44 +0100 Subject: ARM: entry: abort-macro: simplify do_ldrd_abort We can test bits 27:25 and 20 of the instruction at the same time; there's no need to separate out the check of bit 20. Signed-off-by: Russell King --- arch/arm/mm/abort-macro.S | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index 8d3b9f9..af97a10 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -30,12 +30,10 @@ not_thumb: * [20] == 0 */ .macro do_ldrd_abort, tmp, insn - tst \insn, #0x0e000000 @ [27:25] == 0 + tst \insn, #0x0e100000 @ [27:25,20] == 0 bne not_ldrd and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 cmp \tmp, #0x000000d0 - bne not_ldrd - tst \insn, #1 << 20 @ [20] == 0 moveq pc, lr not_ldrd: .endm -- cgit v1.1 From 8b4186160b7894ca4583f702a562856d5d9e9118 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 19:25:02 +0100 Subject: ARM: entry: prefetch abort helper: pass aborted pc in r4 rather than r0 This avoids unnecessary instructions for CPUs which implement the IFAR (instruction fault address register). Signed-off-by: Russell King --- arch/arm/mm/pabort-legacy.S | 3 ++- arch/arm/mm/pabort-v6.S | 3 ++- arch/arm/mm/pabort-v7.S | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S index 87970eb..8a5d8aa 100644 --- a/arch/arm/mm/pabort-legacy.S +++ b/arch/arm/mm/pabort-legacy.S @@ -4,7 +4,7 @@ /* * Function: legacy_pabort * - * Params : r0 = address of aborted instruction + * Params : r4 = address of aborted instruction * * Returns : r0 = address of abort * : r1 = Simulated IFSR with section translation fault status @@ -14,6 +14,7 @@ .align 5 ENTRY(legacy_pabort) + mov r0, r4 mov r1, #5 mov pc, lr ENDPROC(legacy_pabort) diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S index 06e3d1e..eaac1cb 100644 --- a/arch/arm/mm/pabort-v6.S +++ b/arch/arm/mm/pabort-v6.S @@ -4,7 +4,7 @@ /* * Function: v6_pabort * - * Params : r0 = address of aborted instruction + * Params : r4 = address of aborted instruction * * Returns : r0 = address of abort * : r1 = IFSR @@ -14,6 +14,7 @@ .align 5 ENTRY(v6_pabort) + mov r0, r4 mrc p15, 0, r1, c5, c0, 1 @ get IFSR mov pc, lr ENDPROC(v6_pabort) diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S index a8b3b30..b515e0b 100644 --- a/arch/arm/mm/pabort-v7.S +++ b/arch/arm/mm/pabort-v7.S @@ -4,7 +4,7 @@ /* * Function: v6_pabort * - * Params : r0 = address of aborted instruction + * Params : r4 = address of aborted instruction * * Returns : r0 = address of abort * : r1 = IFSR -- cgit v1.1 From 02fe2845d6a837ab02f0738f6cf4591a02cc88d4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 11:44:06 +0100 Subject: ARM: entry: avoid enabling interrupts in prefetch/data abort handlers Avoid enabling interrupts if the parent context had interrupts enabled in the abort handler assembly code, and move this into the breakpoint/ page/alignment fault handlers instead. This gets rid of some special-casing for the breakpoint fault handlers from the low level abort handler path. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 3 +++ arch/arm/mm/fault.c | 4 ++++ 2 files changed, 7 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 724ba3b..be7c638 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -727,6 +727,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) int isize = 4; int thumb2_32b = 0; + if (interrupts_enabled(regs)) + local_irq_enable(); + instrptr = instruction_pointer(regs); fs = get_fs(); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d8..20e5d51 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) tsk = current; mm = tsk->mm; + /* Enable interrupts if they were enabled in the parent context. */ + if (interrupts_enabled(regs)) + local_irq_enable(); + /* * If we're in an interrupt or have no user * context, we must not take the fault.. -- cgit v1.1 From 8dfe7ac96fedd4f5219879f63a8a546a33609daf Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 12:37:35 +0100 Subject: ARM: entry: prefetch abort: tail-call the main prefetch abort handler Tail-call the main C prefetch abort handler code from the per-CPU helper code. Also note that the helper function becomes ABI compliant in terms of the registers preserved. Signed-off-by: Russell King --- arch/arm/mm/pabort-legacy.S | 9 +++++---- arch/arm/mm/pabort-v6.S | 9 +++++---- arch/arm/mm/pabort-v7.S | 11 ++++++----- 3 files changed, 16 insertions(+), 13 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S index 8a5d8aa..8bbff02 100644 --- a/arch/arm/mm/pabort-legacy.S +++ b/arch/arm/mm/pabort-legacy.S @@ -4,10 +4,11 @@ /* * Function: legacy_pabort * - * Params : r4 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = Simulated IFSR with section translation fault status + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ @@ -16,5 +17,5 @@ ENTRY(legacy_pabort) mov r0, r4 mov r1, #5 - mov pc, lr + b do_PrefetchAbort ENDPROC(legacy_pabort) diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S index eaac1cb..9627646 100644 --- a/arch/arm/mm/pabort-v6.S +++ b/arch/arm/mm/pabort-v6.S @@ -4,10 +4,11 @@ /* * Function: v6_pabort * - * Params : r4 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = IFSR + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ @@ -16,5 +17,5 @@ ENTRY(v6_pabort) mov r0, r4 mrc p15, 0, r1, c5, c0, 1 @ get IFSR - mov pc, lr + b do_PrefetchAbort ENDPROC(v6_pabort) diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S index b515e0b..875761f 100644 --- a/arch/arm/mm/pabort-v7.S +++ b/arch/arm/mm/pabort-v7.S @@ -2,12 +2,13 @@ #include /* - * Function: v6_pabort + * Function: v7_pabort * - * Params : r4 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = IFSR + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ @@ -16,5 +17,5 @@ ENTRY(v7_pabort) mrc p15, 0, r0, c6, c0, 2 @ get IFAR mrc p15, 0, r1, c5, c0, 1 @ get IFSR - mov pc, lr + b do_PrefetchAbort ENDPROC(v7_pabort) -- cgit v1.1 From 3e287bec6fde088bff05ee7f998f53e8ac75b922 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 14:35:07 +0100 Subject: ARM: entry: data abort: arrange for CPU abort helpers to take pc/psr in r4/r5 Re-jig the CPU abort helpers to take the PC/PSR in r4/r5 rather than r2/r3. Signed-off-by: Russell King --- arch/arm/mm/abort-ev4.S | 8 +++----- arch/arm/mm/abort-ev4t.S | 8 ++++---- arch/arm/mm/abort-ev5t.S | 8 ++++---- arch/arm/mm/abort-ev5tj.S | 12 +++++------- arch/arm/mm/abort-ev6.S | 12 +++++------- arch/arm/mm/abort-ev7.S | 4 ++-- arch/arm/mm/abort-lv4t.S | 12 ++++++------ arch/arm/mm/abort-nommu.S | 4 ++-- arch/arm/mm/proc-arm6_7.S | 10 +++++----- 9 files changed, 36 insertions(+), 42 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index 4f18f9e..beb112b 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S @@ -3,8 +3,8 @@ /* * Function: v4_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -21,10 +21,8 @@ ENTRY(v4_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r3, [r2] @ read aborted ARM instruction + ldr r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. mov pc, lr - - diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index 9910123..eaa4ac0 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S @@ -4,8 +4,8 @@ /* * Function: v4t_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -22,8 +22,8 @@ ENTRY(v4t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 800e8d4..97eee7c 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -4,8 +4,8 @@ /* * Function: v5t_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -22,8 +22,8 @@ ENTRY(v5t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 @ clear bits 11 of FSR do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ check write diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index bcb58d2..9a365cf 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -4,8 +4,8 @@ /* * Function: v5tj_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -23,13 +23,11 @@ ENTRY(v5tj_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR - tst r3, #PSR_J_BIT @ Java? + tst r5, #PSR_J_BIT @ Java? movne pc, lr - do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr - - diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index ef526e7..52db4a3 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -4,8 +4,8 @@ /* * Function: v6_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -33,10 +33,10 @@ ENTRY(v6_early_abort) * The test below covers all the write situations, including Java bytecodes */ bic r1, r1, #1 << 11 @ clear bit 11 of FSR - tst r3, #PSR_J_BIT @ Java? + tst r5, #PSR_J_BIT @ Java? movne pc, lr - do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 reveq r3, r3 #endif @@ -44,5 +44,3 @@ ENTRY(v6_early_abort) tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr - - diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index ec88b15..6cb5143 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -3,8 +3,8 @@ /* * Function: v7_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 9fb7b0e..fea7514 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -3,8 +3,8 @@ /* * Function: v4t_late_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -18,7 +18,7 @@ * picture. Unfortunately, this does happen. We live with it. */ ENTRY(v4t_late_abort) - tst r3, #PSR_T_BIT @ check for thumb mode + tst r5, #PSR_T_BIT @ check for thumb mode #ifdef CONFIG_CPU_CP15_MMU mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR @@ -28,7 +28,7 @@ ENTRY(v4t_late_abort) mov r1, #0 #endif bne .data_thumb_abort - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 @@ -52,7 +52,7 @@ ENTRY(v4t_late_abort) /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable - mov r0, r2 + mov r0, r4 mov r1, r8 mov r2, sp bl baddataabort @@ -159,7 +159,7 @@ ENTRY(v4t_late_abort) b .data_unknown @ F: MUL? .data_thumb_abort: - ldrh r8, [r2] @ read instruction + ldrh r8, [r4] @ read instruction tst r8, #1 << 11 @ L = 1 -> write? orreq r1, r1, #1 << 8 @ yes and r7, r8, #15 << 12 diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S index 625e580..9eaef6f 100644 --- a/arch/arm/mm/abort-nommu.S +++ b/arch/arm/mm/abort-nommu.S @@ -3,8 +3,8 @@ /* * Function: nommu_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = 0 (abort address) * : r1 = 0 (FSR) diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 5f79dc4..e7be700 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -29,8 +29,8 @@ ENTRY(cpu_arm7_dcache_clean_area) /* * Function: arm6_7_data_abort () * - * Params : r2 = address of aborted instruction - * : sp = pointer to registers + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Purpose : obtain information about current aborted instruction * @@ -41,7 +41,7 @@ ENTRY(cpu_arm7_dcache_clean_area) ENTRY(cpu_arm7_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 0 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 @@ -65,7 +65,7 @@ ENTRY(cpu_arm7_data_abort) /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable - mov r0, r2 + mov r0, r4 mov r1, r8 mov r2, sp bl baddataabort @@ -74,7 +74,7 @@ ENTRY(cpu_arm7_data_abort) ENTRY(cpu_arm6_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 0 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #14 << 24 -- cgit v1.1 From 0d147db0c127c561f8f9ead9f3c1ec38f89f1040 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 14:42:02 +0100 Subject: ARM: entry: data abort: avoid using r2 in abort helpers This allows us to pass the pt_regs pointer in to these functions ready for tail-calling the abort handler. Signed-off-by: Russell King --- arch/arm/mm/abort-ev5t.S | 2 +- arch/arm/mm/abort-ev5tj.S | 2 +- arch/arm/mm/abort-ev6.S | 2 +- arch/arm/mm/abort-ev7.S | 8 ++++---- arch/arm/mm/abort-lv4t.S | 34 +++++++++++++++++----------------- arch/arm/mm/proc-arm6_7.S | 18 +++++++++--------- 6 files changed, 33 insertions(+), 33 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 97eee7c..751391a 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -25,7 +25,7 @@ ENTRY(v5t_early_abort) do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 @ clear bits 11 of FSR - do_ldrd_abort tmp=r2, insn=r3 + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 mov pc, lr diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index 9a365cf..ccfbc93 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -27,7 +27,7 @@ ENTRY(v5tj_early_abort) movne pc, lr do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction - do_ldrd_abort tmp=r2, insn=r3 + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 52db4a3..b64d886 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -40,7 +40,7 @@ ENTRY(v6_early_abort) #ifdef CONFIG_CPU_ENDIAN_BE8 reveq r3, r3 #endif - do_ldrd_abort tmp=r2, insn=r3 + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 6cb5143..6f98b3a 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -41,13 +41,13 @@ ENTRY(v7_early_abort) mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR isb - mrc p15, 0, r2, c7, c4, 0 @ Read the PAR - and r3, r2, #0x7b @ On translation fault + mrc p15, 0, ip, c7, c4, 0 @ Read the PAR + and r3, ip, #0x7b @ On translation fault cmp r3, #0x0b movne pc, lr bic r1, r1, #0xf @ Fix up FSR FS[5:0] - and r2, r2, #0x7e - orr r1, r1, r2, LSR #1 + and ip, ip, #0x7e + orr r1, r1, ip, LSR #1 #endif mov pc, lr diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index fea7514..d032b1f 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -64,12 +64,12 @@ ENTRY(v4t_late_abort) mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 - and r2, r8, r7, lsl #1 - add r6, r6, r2, lsr #1 - and r2, r8, r7, lsl #2 - add r6, r6, r2, lsr #2 - and r2, r8, r7, lsl #3 - add r6, r6, r2, lsr #3 + and r9, r8, r7, lsl #1 + add r6, r6, r9, lsr #1 + and r9, r8, r7, lsl #2 + add r6, r6, r9, lsr #2 + and r9, r8, r7, lsl #3 + add r6, r6, r9, lsr #3 add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. @@ -103,13 +103,13 @@ ENTRY(v4t_late_abort) tst r8, #1 << 21 @ check writeback bit moveq pc, lr @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r2, r8, lsl #20 @ Get offset + movs r9, r8, lsl #20 @ Get offset moveq pc, lr @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r2, lsr #20 @ Undo increment - addeq r7, r7, r2, lsr #20 @ Undo decrement + subne r7, r7, r9, lsr #20 @ Undo increment + addeq r7, r7, r9, lsr #20 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' mov pc, lr @@ -194,11 +194,11 @@ ENTRY(v4t_late_abort) tst r8, #1 << 10 beq .data_unknown and r6, r8, #0x55 @ hweight8(r8) + R bit - and r2, r8, #0xaa - add r6, r6, r2, lsr #1 - and r2, r6, #0xcc + and r9, r8, #0xaa + add r6, r6, r9, lsr #1 + and r9, r6, #0xcc and r6, r6, #0x33 - add r6, r6, r2, lsr #2 + add r6, r6, r9, lsr #2 movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) adc r6, r6, r6, lsr #4 @ high + low nibble + R bit and r6, r6, #15 @ number of regs to transfer @@ -211,11 +211,11 @@ ENTRY(v4t_late_abort) .data_thumb_ldmstm: and r6, r8, #0x55 @ hweight8(r8) - and r2, r8, #0xaa - add r6, r6, r2, lsr #1 - and r2, r6, #0xcc + and r9, r8, #0xaa + add r6, r6, r9, lsr #1 + and r9, r6, #0xcc and r6, r6, #0x33 - add r6, r6, r2, lsr #2 + add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 and r5, r8, #7 << 8 ldr r7, [sp, r5, lsr #6] diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index e7be700..d4c328e 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -87,12 +87,12 @@ ENTRY(cpu_arm6_data_abort) mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 - and r2, r8, r7, lsl #1 - add r6, r6, r2, lsr #1 - and r2, r8, r7, lsl #2 - add r6, r6, r2, lsr #2 - and r2, r8, r7, lsl #3 - add r6, r6, r2, lsr #3 + and r9, r8, r7, lsl #1 + add r6, r6, r9, lsr #1 + and r9, r8, r7, lsl #2 + add r6, r6, r9, lsr #2 + and r9, r8, r7, lsl #3 + add r6, r6, r9, lsr #3 add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. @@ -117,13 +117,13 @@ ENTRY(cpu_arm6_data_abort) tst r8, #1 << 21 @ check writeback bit moveq pc, lr @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r2, r8, lsl #20 @ Get offset + movs r9, r8, lsl #20 @ Get offset moveq pc, lr @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r2, lsr #20 @ Undo increment - addeq r7, r7, r2, lsr #20 @ Undo decrement + subne r7, r7, r9, lsr #20 @ Undo increment + addeq r7, r7, r9, lsr #20 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' mov pc, lr -- cgit v1.1 From da7404725781bc7c736e10cae5521e5604e222a5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 16:01:26 +0100 Subject: ARM: entry: data abort: tail-call the main data abort handler Tail-call the main C data abort handler code from the per-CPU helper code. Update the comments in the code wrt the new calling and return register state. Signed-off-by: Russell King --- arch/arm/mm/abort-ev4.S | 11 ++++------- arch/arm/mm/abort-ev4t.S | 11 ++++------- arch/arm/mm/abort-ev5t.S | 11 ++++------- arch/arm/mm/abort-ev5tj.S | 13 +++++-------- arch/arm/mm/abort-ev6.S | 13 +++++-------- arch/arm/mm/abort-ev7.S | 15 ++++++--------- arch/arm/mm/abort-lv4t.S | 43 +++++++++++++++++++++---------------------- arch/arm/mm/abort-macro.S | 4 ++-- arch/arm/mm/abort-nommu.S | 8 ++++---- arch/arm/mm/proc-arm6_7.S | 29 ++++++++++++++--------------- 10 files changed, 69 insertions(+), 89 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index beb112b..54473cd4 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S @@ -3,14 +3,11 @@ /* * Function: v4_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -25,4 +22,4 @@ ENTRY(v4_early_abort) bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index eaa4ac0..9da704e 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S @@ -4,14 +4,11 @@ /* * Function: v4t_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -27,4 +24,4 @@ ENTRY(v4t_early_abort) bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 751391a..a0908d4 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -4,14 +4,11 @@ /* * Function: v5t_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -28,4 +25,4 @@ ENTRY(v5t_early_abort) do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index ccfbc93..4006b7a 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -4,14 +4,11 @@ /* * Function: v5tj_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -24,10 +21,10 @@ ENTRY(v5tj_early_abort) mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r5, #PSR_J_BIT @ Java? - movne pc, lr + bne do_DataAbort do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index b64d886..ff1f7cc 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -4,14 +4,11 @@ /* * Function: v6_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -34,7 +31,7 @@ ENTRY(v6_early_abort) */ bic r1, r1, #1 << 11 @ clear bit 11 of FSR tst r5, #PSR_J_BIT @ Java? - movne pc, lr + bne do_DataAbort do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 @@ -43,4 +40,4 @@ ENTRY(v6_early_abort) do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 6f98b3a..7033752 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -3,14 +3,11 @@ /* * Function: v7_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. */ @@ -37,18 +34,18 @@ ENTRY(v7_early_abort) ldr r3, =0x40d @ On permission fault and r3, r1, r3 cmp r3, #0x0d - movne pc, lr + bne do_DataAbort mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR isb mrc p15, 0, ip, c7, c4, 0 @ Read the PAR and r3, ip, #0x7b @ On translation fault cmp r3, #0x0b - movne pc, lr + bne do_DataAbort bic r1, r1, #0xf @ Fix up FSR FS[5:0] and ip, ip, #0x7e orr r1, r1, ip, LSR #1 #endif - mov pc, lr + b do_DataAbort ENDPROC(v7_early_abort) diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index d032b1f..d432f31 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -3,7 +3,8 @@ /* * Function: v4t_late_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * * Returns : r0 = address of abort @@ -47,20 +48,18 @@ ENTRY(v4t_late_abort) /* 9 */ b .data_arm_ldmstm @ ldm*b rn, /* a */ b .data_unknown /* b */ b .data_unknown -/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m -/* d */ mov pc, lr @ ldc rd, [rn, #m] +/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m +/* d */ b do_DataAbort @ ldc rd, [rn, #m] /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable mov r0, r4 mov r1, r8 - mov r2, sp - bl baddataabort - b ret_from_exception + b baddataabort .data_arm_ldmstm: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 @@ -79,11 +78,11 @@ ENTRY(v4t_late_abort) subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrhpre: tst r8, #1 << 21 @ Check writeback bit - moveq pc, lr @ No writeback -> no fixup + beq do_DataAbort @ No writeback -> no fixup .data_arm_lateldrhpost: and r5, r8, #0x00f @ get Rm / low nibble of immediate value tst r8, #1 << 22 @ if (immediate offset) @@ -97,25 +96,25 @@ ENTRY(v4t_late_abort) subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrpreconst: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: movs r9, r8, lsl #20 @ Get offset - moveq pc, lr @ zero -> no fixup + beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r9, lsr #20 @ Undo increment addeq r7, r7, r9, lsr #20 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrprereg: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' @@ -172,10 +171,10 @@ ENTRY(v4t_late_abort) /* 3 */ b .data_unknown /* 4 */ b .data_unknown /* 5 */ b .data_thumb_reg -/* 6 */ mov pc, lr -/* 7 */ mov pc, lr -/* 8 */ mov pc, lr -/* 9 */ mov pc, lr +/* 6 */ b do_DataAbort +/* 7 */ b do_DataAbort +/* 8 */ b do_DataAbort +/* 9 */ b do_DataAbort /* A */ b .data_unknown /* B */ b .data_thumb_pushpop /* C */ b .data_thumb_ldmstm @@ -185,10 +184,10 @@ ENTRY(v4t_late_abort) .data_thumb_reg: tst r8, #1 << 9 - moveq pc, lr + beq do_DataAbort tst r8, #1 << 10 @ If 'S' (signed) bit is set movne r1, #0 @ it must be a load instr - mov pc, lr + b do_DataAbort .data_thumb_pushpop: tst r8, #1 << 10 @@ -207,7 +206,7 @@ ENTRY(v4t_late_abort) addeq r7, r7, r6, lsl #2 @ increment SP if PUSH subne r7, r7, r6, lsl #2 @ decrement SP if POP str r7, [sp, #13 << 2] - mov pc, lr + b do_DataAbort .data_thumb_ldmstm: and r6, r8, #0x55 @ hweight8(r8) @@ -222,4 +221,4 @@ ENTRY(v4t_late_abort) and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement str r7, [sp, r5, lsr #6] - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index af97a10..52162d5 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -18,7 +18,7 @@ orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes tst \tmp, #1 << 11 @ L = 0 -> write orreq \psr, \psr, #1 << 11 @ yes. - mov pc, lr + b do_DataAbort not_thumb: .endm @@ -34,7 +34,7 @@ not_thumb: bne not_ldrd and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 cmp \tmp, #0x000000d0 - moveq pc, lr + beq do_DataAbort not_ldrd: .endm diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S index 9eaef6f..119cb47 100644 --- a/arch/arm/mm/abort-nommu.S +++ b/arch/arm/mm/abort-nommu.S @@ -3,11 +3,11 @@ /* * Function: nommu_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = 0 (abort address) - * : r1 = 0 (FSR) + * Returns : r4 - r11, r13 preserved * * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. * Just fill zero into the registers. @@ -16,5 +16,5 @@ ENTRY(nommu_early_abort) mov r0, #0 @ clear r0, r1 (no FSR/FAR) mov r1, #0 - mov pc, lr + b do_DataAbort ENDPROC(nommu_early_abort) diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index d4c328e..d755d5b 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -29,7 +29,8 @@ ENTRY(cpu_arm7_dcache_clean_area) /* * Function: arm6_7_data_abort () * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * * Purpose : obtain information about current aborted instruction @@ -49,7 +50,7 @@ ENTRY(cpu_arm7_data_abort) nop /* 0 */ b .data_unknown -/* 1 */ mov pc, lr @ swp +/* 1 */ b do_DataAbort @ swp /* 2 */ b .data_unknown /* 3 */ b .data_unknown /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m @@ -60,16 +61,14 @@ ENTRY(cpu_arm7_data_abort) /* 9 */ b .data_arm_ldmstm @ ldm*b rn, /* a */ b .data_unknown /* b */ b .data_unknown -/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m -/* d */ mov pc, lr @ ldc rd, [rn, #m] +/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m +/* d */ b do_DataAbort @ ldc rd, [rn, #m] /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable mov r0, r4 mov r1, r8 - mov r2, sp - bl baddataabort - b ret_from_exception + b baddataabort ENTRY(cpu_arm6_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR @@ -79,11 +78,11 @@ ENTRY(cpu_arm6_data_abort) orreq r1, r1, #1 << 11 @ yes. and r7, r8, #14 << 24 teq r7, #8 << 24 @ was it ldm/stm - movne pc, lr + bne do_DataAbort .data_arm_ldmstm: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 @@ -102,7 +101,7 @@ ENTRY(cpu_arm6_data_abort) subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_apply_r6_and_rn: and r5, r8, #15 << 16 @ Extract 'n' from instruction @@ -111,25 +110,25 @@ ENTRY(cpu_arm6_data_abort) subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrpreconst: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: movs r9, r8, lsl #20 @ Get offset - moveq pc, lr @ zero -> no fixup + beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r9, lsr #20 @ Undo increment addeq r7, r7, r9, lsr #20 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrprereg: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' -- cgit v1.1 From e22c12f9146d50ee6b0cf97db46b3310409f64e6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Jun 2011 09:52:54 +0100 Subject: ARM: entry: data abort: use r2 as base of pt_regs rather than stack Now that we pass r2 into these helper functions as the pointer to pt_regs, use r2 as the base of the registers on the stack rather than using the stack pointer directly. Signed-off-by: Russell King --- arch/arm/mm/abort-lv4t.S | 24 ++++++++++++------------ arch/arm/mm/proc-arm6_7.S | 14 +++++++------- 2 files changed, 19 insertions(+), 19 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index d432f31..921aaab 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -73,11 +73,11 @@ ENTRY(v4t_late_abort) add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrhpre: @@ -88,14 +88,14 @@ ENTRY(v4t_late_abort) tst r8, #1 << 22 @ if (immediate offset) andne r6, r8, #0xf00 @ { immediate high nibble orrne r6, r5, r6, lsr #4 @ combine nibbles } else - ldreq r6, [sp, r5, lsl #2] @ { load Rm value } + ldreq r6, [r2, r5, lsl #2] @ { load Rm value } .data_arm_apply_r6_and_rn: and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrpreconst: @@ -105,11 +105,11 @@ ENTRY(v4t_late_abort) movs r9, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r9, lsr #20 @ Undo increment addeq r7, r7, r9, lsr #20 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrprereg: @@ -117,7 +117,7 @@ ENTRY(v4t_late_abort) beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction - ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' + ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' mov r5, r8, lsr #7 @ get shift count ands r5, r5, #31 and r7, r8, #0x70 @ get shift type @@ -201,11 +201,11 @@ ENTRY(v4t_late_abort) movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) adc r6, r6, r6, lsr #4 @ high + low nibble + R bit and r6, r6, #15 @ number of regs to transfer - ldr r7, [sp, #13 << 2] + ldr r7, [r2, #13 << 2] tst r8, #1 << 11 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH subne r7, r7, r6, lsl #2 @ decrement SP if POP - str r7, [sp, #13 << 2] + str r7, [r2, #13 << 2] b do_DataAbort .data_thumb_ldmstm: @@ -217,8 +217,8 @@ ENTRY(v4t_late_abort) add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 and r5, r8, #7 << 8 - ldr r7, [sp, r5, lsr #6] + ldr r7, [r2, r5, lsr #6] and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement - str r7, [sp, r5, lsr #6] + str r7, [r2, r5, lsr #6] b do_DataAbort diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index d755d5b..141906e 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -96,20 +96,20 @@ ENTRY(cpu_arm6_data_abort) add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_apply_r6_and_rn: and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrpreconst: @@ -119,11 +119,11 @@ ENTRY(cpu_arm6_data_abort) movs r9, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r9, lsr #20 @ Undo increment addeq r7, r7, r9, lsr #20 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrprereg: @@ -131,7 +131,7 @@ ENTRY(cpu_arm6_data_abort) beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction - ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' + ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' mov r5, r8, lsr #7 @ get shift count ands r5, r5, #31 and r7, r8, #0x70 @ get shift type -- cgit v1.1 From 108f6af0a82cf3b61f3ac6728e2241805a935b64 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Jun 2011 12:23:11 +0100 Subject: ARM: entry: data abort: always use r6 for offset Signed-off-by: Russell King --- arch/arm/mm/abort-lv4t.S | 6 +++--- arch/arm/mm/proc-arm6_7.S | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 921aaab..54b6d27 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -102,13 +102,13 @@ ENTRY(v4t_late_abort) tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r9, r8, lsl #20 @ Get offset + movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r9, lsr #20 @ Undo increment - addeq r7, r7, r9, lsr #20 @ Undo decrement + subne r7, r7, r6, lsr #20 @ Undo increment + addeq r7, r7, r6, lsr #20 @ Undo decrement str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 141906e..4d96311 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -116,13 +116,13 @@ ENTRY(cpu_arm6_data_abort) tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r9, r8, lsl #20 @ Get offset + movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r9, lsr #20 @ Undo increment - addeq r7, r7, r9, lsr #20 @ Undo decrement + subne r7, r7, r6, lsr #20 @ Undo increment + addeq r7, r7, r6, lsr #20 @ Undo decrement str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort -- cgit v1.1 From 40f0b90a2f16f433f9afbfef4b7c312efb54e933 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Jun 2011 12:27:47 +0100 Subject: ARM: entry: data abort: ensure r5 is preserved by abort functions Signed-off-by: Russell King --- arch/arm/mm/abort-lv4t.S | 48 ++++++++++++++++++++++------------------------- arch/arm/mm/proc-arm6_7.S | 33 ++++++++++++++++---------------- 2 files changed, 38 insertions(+), 43 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 54b6d27..f398258 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -7,11 +7,7 @@ * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4-r5, r10-r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -72,30 +68,30 @@ ENTRY(v4t_late_abort) add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrhpre: tst r8, #1 << 21 @ Check writeback bit beq do_DataAbort @ No writeback -> no fixup .data_arm_lateldrhpost: - and r5, r8, #0x00f @ get Rm / low nibble of immediate value + and r9, r8, #0x00f @ get Rm / low nibble of immediate value tst r8, #1 << 22 @ if (immediate offset) andne r6, r8, #0xf00 @ { immediate high nibble - orrne r6, r5, r6, lsr #4 @ combine nibbles } else - ldreq r6, [r2, r5, lsl #2] @ { load Rm value } + orrne r6, r9, r6, lsr #4 @ combine nibbles } else + ldreq r6, [r2, r9, lsl #2] @ { load Rm value } .data_arm_apply_r6_and_rn: - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrpreconst: @@ -104,12 +100,12 @@ ENTRY(v4t_late_abort) .data_arm_lateldrpostconst: movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsr #20 @ Undo increment addeq r7, r7, r6, lsr #20 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrprereg: @@ -118,14 +114,14 @@ ENTRY(v4t_late_abort) .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' - mov r5, r8, lsr #7 @ get shift count - ands r5, r5, #31 + mov r9, r8, lsr #7 @ get shift count + ands r9, r9, #31 and r7, r8, #0x70 @ get shift type orreq r7, r7, #8 @ shift count = 0 add pc, pc, r7 nop - mov r6, r6, lsl r5 @ 0: LSL #!0 + mov r6, r6, lsl r9 @ 0: LSL #!0 b .data_arm_apply_r6_and_rn b .data_arm_apply_r6_and_rn @ 1: LSL #0 nop @@ -133,7 +129,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ 3: MUL? nop - mov r6, r6, lsr r5 @ 4: LSR #!0 + mov r6, r6, lsr r9 @ 4: LSR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, lsr #32 @ 5: LSR #32 b .data_arm_apply_r6_and_rn @@ -141,7 +137,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ 7: MUL? nop - mov r6, r6, asr r5 @ 8: ASR #!0 + mov r6, r6, asr r9 @ 8: ASR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, asr #32 @ 9: ASR #32 b .data_arm_apply_r6_and_rn @@ -149,7 +145,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ B: MUL? nop - mov r6, r6, ror r5 @ C: ROR #!0 + mov r6, r6, ror r9 @ C: ROR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, rrx @ D: RRX b .data_arm_apply_r6_and_rn @@ -216,9 +212,9 @@ ENTRY(v4t_late_abort) and r6, r6, #0x33 add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 - and r5, r8, #7 << 8 - ldr r7, [r2, r5, lsr #6] + and r9, r8, #7 << 8 + ldr r7, [r2, r9, lsr #6] and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement - str r7, [r2, r5, lsr #6] + str r7, [r2, r9, lsr #6] b do_DataAbort diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 4d96311..50e3543 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -35,8 +35,7 @@ ENTRY(cpu_arm7_dcache_clean_area) * * Purpose : obtain information about current aborted instruction * - * Returns : r0 = address of abort - * : r1 = FSR + * Returns : r4-r5, r10-r11, r13 preserved */ ENTRY(cpu_arm7_data_abort) @@ -95,21 +94,21 @@ ENTRY(cpu_arm6_data_abort) add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_apply_r6_and_rn: - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrpreconst: @@ -118,12 +117,12 @@ ENTRY(cpu_arm6_data_abort) .data_arm_lateldrpostconst: movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsr #20 @ Undo increment addeq r7, r7, r6, lsr #20 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrprereg: @@ -132,14 +131,14 @@ ENTRY(cpu_arm6_data_abort) .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' - mov r5, r8, lsr #7 @ get shift count - ands r5, r5, #31 + mov r9, r8, lsr #7 @ get shift count + ands r9, r9, #31 and r7, r8, #0x70 @ get shift type orreq r7, r7, #8 @ shift count = 0 add pc, pc, r7 nop - mov r6, r6, lsl r5 @ 0: LSL #!0 + mov r6, r6, lsl r9 @ 0: LSL #!0 b .data_arm_apply_r6_and_rn b .data_arm_apply_r6_and_rn @ 1: LSL #0 nop @@ -147,7 +146,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ 3: MUL? nop - mov r6, r6, lsr r5 @ 4: LSR #!0 + mov r6, r6, lsr r9 @ 4: LSR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, lsr #32 @ 5: LSR #32 b .data_arm_apply_r6_and_rn @@ -155,7 +154,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ 7: MUL? nop - mov r6, r6, asr r5 @ 8: ASR #!0 + mov r6, r6, asr r9 @ 8: ASR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, asr #32 @ 9: ASR #32 b .data_arm_apply_r6_and_rn @@ -163,7 +162,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ B: MUL? nop - mov r6, r6, ror r5 @ C: ROR #!0 + mov r6, r6, ror r9 @ C: ROR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, rrx @ D: RRX b .data_arm_apply_r6_and_rn -- cgit v1.1 From 0371d3f7e8f1cddaee1f215e42c09a40e235d810 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 5 Jul 2011 19:58:29 +0100 Subject: ARM: move memory layout sanity checking before meminfo initialization Ensure that the meminfo array is sanity checked before we pass the memory to memblock. This helps to ensure that memblock and meminfo agree on the dimensions of memory, especially when more memory is passed than the kernel can deal with. Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 5 +++-- arch/arm/mm/nommu.c | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d9e736..594d677 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -759,7 +759,7 @@ early_param("vmalloc", early_vmalloc); static phys_addr_t lowmem_limit __initdata = 0; -static void __init sanity_check_meminfo(void) +void __init sanity_check_meminfo(void) { int i, j, highmem = 0; @@ -1032,8 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; + memblock_set_current_limit(lowmem_limit); + build_mem_type_table(); - sanity_check_meminfo(); prepare_page_table(); map_lowmem(); devicemaps_init(mdesc); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 687d023..941a98c 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void) memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); } +void __init sanity_check_meminfo(void) +{ +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. -- cgit v1.1 From 38a8914f9ac2379293944f613e6ca24b61373de8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Jul 2011 14:36:19 +0100 Subject: ARM: 6987/1: l2x0: fix disabling function to avoid deadlock The l2x0_disable function attempts to writel with the l2x0_lock held. This results in deadlock when the writel contains an outer_sync call for the platform since the l2x0_lock is already held by the disable function. A further problem is that disabling the L2 without flushing it first can lead to the spin_lock operation becoming visible after the spin_unlock, causing any subsequent L2 maintenance to deadlock. This patch replaces the writel with a call to writel_relaxed in the disabling code and adds a flush before disabling in the control register, preventing livelock from occurring. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index ef59099..44c0867 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -120,17 +120,22 @@ static void l2x0_cache_sync(void) spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2x0_flush_all(void) +static void __l2x0_flush_all(void) { - unsigned long flags; - - /* clean all ways */ - spin_lock_irqsave(&l2x0_lock, flags); debug_writel(0x03); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); debug_writel(0x00); +} + +static void l2x0_flush_all(void) +{ + unsigned long flags; + + /* clean all ways */ + spin_lock_irqsave(&l2x0_lock, flags); + __l2x0_flush_all(); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -266,7 +271,9 @@ static void l2x0_disable(void) unsigned long flags; spin_lock_irqsave(&l2x0_lock, flags); - writel(0, l2x0_base + L2X0_CTRL); + __l2x0_flush_all(); + writel_relaxed(0, l2x0_base + L2X0_CTRL); + dsb(); spin_unlock_irqrestore(&l2x0_lock, flags); } -- cgit v1.1 From 3835d69a6c7048a28d0aea3cb8403d5e83a0f867 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 6 Jul 2011 10:39:34 +0100 Subject: ARM: vmlinux.lds: move init sections between text and data sections Place the init sections between the text and data sections. This means all code is grouped together at the beginning of the kernel image, and all data is at the end of the image. This avoids problems with the 24-bit branch instruction relocations becoming invalid with large initramfs images. Acked-by: Nicolas Pitre Tested-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/mm/init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c19571c..b8e8912 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -639,8 +639,8 @@ void __init mem_init(void) " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" - " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .data : 0x%p" " - 0x%p" " (%4d kB)\n" " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", @@ -662,8 +662,8 @@ void __init mem_init(void) #endif MLM(MODULES_VADDR, MODULES_END), - MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_sdata, _edata), MLK_ROUNDUP(__bss_start, __bss_stop)); -- cgit v1.1 From 54d525735140e930d87c5afd1b0b3393ffdac021 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 7 Jul 2011 18:43:36 +0100 Subject: ARM: 6996/1: mm: Poison freed init memory Poisoning __init marked memory can be useful when tracking down obscure memory corruption bugs. Therefore, poison init memory with 0xe7fddef0 to catch bugs earlier. The poison value is an undefined instruction in ARM mode and branch to an undefined instruction in Thumb mode. Signed-off-by: Stephen Boyd Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/init.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2c2cce9..fdc87f9 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -416,6 +416,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) return pages; } +/* + * Poison init memory with an undefined instruction (ARM) or a branch to an + * undefined instruction (Thumb). + */ +static inline void poison_init_mem(void *s, size_t count) +{ + u32 *p = (u32 *)s; + while ((count = count - 4)) + *p++ = 0xe7fddef0; +} + static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { @@ -696,11 +707,13 @@ void free_initmem(void) #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), __phys_to_pfn(__pa(&__tcm_end)), "TCM link"); #endif + poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), __phys_to_pfn(__pa(__init_end)), @@ -713,10 +726,12 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) + if (!keep_initrd) { + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); totalram_pages += free_area(__phys_to_pfn(__pa(start)), __phys_to_pfn(__pa(end)), "initrd"); + } } static int __init keepinitrd_setup(char *__unused) -- cgit v1.1 From c7e89b16eb90e7bdf0d71bd5ba265ce8d424f30b Mon Sep 17 00:00:00 2001 From: Heechul Yun Date: Fri, 8 Jul 2011 13:32:34 +0100 Subject: ARM: 6995/2: mm: remove unnecessary cache flush on v6 copypage Originally introduced to maintain coherency between icache and dcache in v6 nonaliasing mode. This is now handled by __sync_icache_dcache since c0177800, therefore unnecessary in this function. Signed-off-by: Heechul Yun Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/copypage-v6.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index bdba6c6..63cca00 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); - __cpuc_flush_dcache_area(kto, PAGE_SIZE); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } -- cgit v1.1 From 022ae537b23cb14a391565e9ad9e9945f4b17138 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 8 Jul 2011 21:26:59 +0100 Subject: ARM: dma: replace ISA_DMA_THRESHOLD with a variable ISA_DMA_THRESHOLD has been unused by non-arch code, so lets now get rid of it from ARM by replacing it with arm_dma_zone_mask. Move dma_supported() and dma_set_mask() out of line, and have dma_supported() check this new variable instead. Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 35 ++++++++++++++++++++++++++++++++--- arch/arm/mm/init.c | 10 ++++++++++ arch/arm/mm/mm.h | 6 ++++++ 3 files changed, 48 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82a093c..0a0a1e7 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -25,9 +25,11 @@ #include #include +#include "mm.h" + static u64 get_coherent_dma_mask(struct device *dev) { - u64 mask = ISA_DMA_THRESHOLD; + u64 mask = (u64)arm_dma_limit; if (dev) { mask = dev->coherent_dma_mask; @@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - if ((~mask) & ISA_DMA_THRESHOLD) { + if ((~mask) & (u64)arm_dma_limit) { dev_warn(dev, "coherent DMA mask %#llx is smaller " "than system GFP_DMA mask %#llx\n", - mask, (unsigned long long)ISA_DMA_THRESHOLD); + mask, (u64)arm_dma_limit); return 0; } } @@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } EXPORT_SYMBOL(dma_sync_sg_for_device); +/* + * Return whether the given device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during bus mastering, then you would pass 0x00ffffff as the mask + * to this function. + */ +int dma_supported(struct device *dev, u64 mask) +{ + if (mask < (u64)arm_dma_limit) + return 0; + return 1; +} +EXPORT_SYMBOL(dma_supported); + +int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + return -EIO; + +#ifndef CONFIG_DMABOUNCE + *dev->dma_mask = dma_mask; +#endif + + return 0; +} +EXPORT_SYMBOL(dma_set_mask); + #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c19571c..17d6cd0c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -212,6 +212,14 @@ static void __init arm_bootmem_init(unsigned long start_pfn, } #ifdef CONFIG_ZONE_DMA +/* + * The DMA mask corresponding to the maximum bus address allocatable + * using GFP_DMA. The default here places no restriction on DMA + * allocations. This must be the smallest DMA mask in the system, + * so a successful GFP_DMA allocation will always satisfy this. + */ +u32 arm_dma_limit; + static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, unsigned long dma_size) { @@ -278,6 +286,8 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, */ arm_adjust_dma_zone(zone_size, zhole_size, ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); + + arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1; #endif free_area_init_node(0, zone_size, min, zhole_size); diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5b3d7d5..0105667 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif +#ifdef CONFIG_ZONE_DMA +extern u32 arm_dma_limit; +#else +#define arm_dma_limit ((u32)~0) +#endif + void __init bootmem_init(void); void arm_mm_memblock_reserve(void); -- cgit v1.1 From 140d5dc1d7d86d766951b7a9aef5b66f1dfdfae2 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 13 Jul 2011 16:32:58 +0100 Subject: ARM: 7000/1: LPAE: Use long long printk format for displaying the pud Currently using just long but this is not enough for the LPAE format (64-bit entries). Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/fault.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d8..ee76923 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) pud = pud_offset(pgd, addr); if (PTRS_PER_PUD != 1) - printk(", *pud=%08lx", pud_val(*pud)); + printk(", *pud=%08llx", (long long)pud_val(*pud)); if (pud_none(*pud)) break; -- cgit v1.1