From 7bf5234db7cce45fa9ff237ce0f45da2bd277cad Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 28 Apr 2017 09:40:00 -0700 Subject: xtensa: add -mno-serialize-volatile to CFLAGS By default xtensa gcc inserts memw for all volatile object accesses. This is too pessimistic for the kernel: there should be no "normal" volatile objects, and all special objects, like MMIO or objects shared between CPUs should have explicit barriers. Signed-off-by: Max Filippov --- arch/xtensa/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 7ee02fe..a206598 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -46,6 +46,7 @@ KBUILD_CFLAGS += -ffreestanding -D__linux__ KBUILD_CFLAGS += -pipe -mlongcalls KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) +KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,) ifneq ($(CONFIG_LD_NO_RELAX),) LDFLAGS := --no-relax -- cgit v1.1 From f8f02ca73cd8d1e2ac61ea1e5f0574a8c1f472fa Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 3 Dec 2017 20:55:35 -0800 Subject: xtensa: build kernel with text-section-literals vmlinux.lds.S doesn't do anything special with literals, so instead of keeping them separate put them into the corresponding text sections. Drop explicit .literal sections from the vmlinux.lds.S, use standard section macros. Mark literal pool locations in the assembly sources. Unfortunately assembler doesn't put literals into .init sections and external libgcc may still have .literal sections, so sed transformation to the linker script is still needed. Signed-off-by: Max Filippov --- arch/xtensa/Makefile | 6 +-- arch/xtensa/boot/boot-redboot/bootstrap.S | 1 + arch/xtensa/kernel/Makefile | 3 -- arch/xtensa/kernel/align.S | 2 +- arch/xtensa/kernel/entry.S | 8 +++ arch/xtensa/kernel/setup.c | 16 +++--- arch/xtensa/kernel/vectors.S | 14 ++--- arch/xtensa/kernel/vmlinux.lds.S | 90 +++++++++---------------------- 8 files changed, 52 insertions(+), 88 deletions(-) diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index a206598..3a934b7 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -42,12 +42,12 @@ export PLATFORM # temporarily until string.h is fixed KBUILD_CFLAGS += -ffreestanding -D__linux__ - -KBUILD_CFLAGS += -pipe -mlongcalls - +KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,) +KBUILD_AFLAGS += -mlongcalls -mtext-section-literals + ifneq ($(CONFIG_LD_NO_RELAX),) LDFLAGS := --no-relax endif diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S index bf7fabe..bbf3b4b 100644 --- a/arch/xtensa/boot/boot-redboot/bootstrap.S +++ b/arch/xtensa/boot/boot-redboot/bootstrap.S @@ -42,6 +42,7 @@ __start_a0: .align 4 .section .text, "ax" + .literal_position .begin literal_prefix .text /* put literals in here! */ diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index bb8d557..9190759 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -17,9 +17,6 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o -AFLAGS_head.o += -mtext-section-literals -AFLAGS_mxhead.o += -mtext-section-literals - # In the Xtensa architecture, assembly generates literals which must always # precede the L32R instruction with a relative offset less than 256 kB. # Therefore, the .text and .literal section must be combined in parenthesis diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 890004a..24b3189 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -155,7 +155,7 @@ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ - + .literal_position ENTRY(fast_unaligned) /* Note: We don't expect the address to be aligned on a word diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 37a2395..5d57078 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -125,6 +125,7 @@ * * Note: _user_exception might be at an odd address. Don't use call0..call12 */ + .literal_position ENTRY(user_exception) @@ -777,6 +778,8 @@ ENDPROC(kernel_exception) * When we get here, a0 is trashed and saved to excsave[debuglevel] */ + .literal_position + ENTRY(debug_exception) rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL @@ -916,6 +919,8 @@ ENDPROC(debug_exception) unrecoverable_text: .ascii "Unrecoverable error in exception handler\0" + .literal_position + ENTRY(unrecoverable_exception) movi a0, 1 @@ -1117,6 +1122,8 @@ ENDPROC(fast_syscall_unrecoverable) * j done */ + .literal_position + #ifdef CONFIG_FAST_SYSCALL_XTENSA #define TRY \ @@ -1887,6 +1894,7 @@ ENDPROC(fast_store_prohibited) * void system_call (struct pt_regs* regs, int exccause) * a2 a3 */ + .literal_position ENTRY(system_call) diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 08175df..253a017 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -277,13 +277,13 @@ extern char _end[]; extern char _stext[]; extern char _WindowVectors_text_start; extern char _WindowVectors_text_end; -extern char _DebugInterruptVector_literal_start; +extern char _DebugInterruptVector_text_start; extern char _DebugInterruptVector_text_end; -extern char _KernelExceptionVector_literal_start; +extern char _KernelExceptionVector_text_start; extern char _KernelExceptionVector_text_end; -extern char _UserExceptionVector_literal_start; +extern char _UserExceptionVector_text_start; extern char _UserExceptionVector_text_end; -extern char _DoubleExceptionVector_literal_start; +extern char _DoubleExceptionVector_text_start; extern char _DoubleExceptionVector_text_end; #if XCHAL_EXCM_LEVEL >= 2 extern char _Level2InterruptVector_text_start; @@ -339,16 +339,16 @@ void __init setup_arch(char **cmdline_p) mem_reserve(__pa(&_WindowVectors_text_start), __pa(&_WindowVectors_text_end)); - mem_reserve(__pa(&_DebugInterruptVector_literal_start), + mem_reserve(__pa(&_DebugInterruptVector_text_start), __pa(&_DebugInterruptVector_text_end)); - mem_reserve(__pa(&_KernelExceptionVector_literal_start), + mem_reserve(__pa(&_KernelExceptionVector_text_start), __pa(&_KernelExceptionVector_text_end)); - mem_reserve(__pa(&_UserExceptionVector_literal_start), + mem_reserve(__pa(&_UserExceptionVector_text_start), __pa(&_UserExceptionVector_text_end)); - mem_reserve(__pa(&_DoubleExceptionVector_literal_start), + mem_reserve(__pa(&_DoubleExceptionVector_text_start), __pa(&_DoubleExceptionVector_text_end)); #if XCHAL_EXCM_LEVEL >= 2 diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 332e9d6..2bc8505 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -205,9 +205,6 @@ ENDPROC(_KernelExceptionVector) */ .section .DoubleExceptionVector.text, "ax" - .begin literal_prefix .DoubleExceptionVector - .globl _DoubleExceptionVector_WindowUnderflow - .globl _DoubleExceptionVector_WindowOverflow ENTRY(_DoubleExceptionVector) @@ -217,8 +214,12 @@ ENTRY(_DoubleExceptionVector) /* Check for kernel double exception (usually fatal). */ rsr a2, ps - _bbci.l a2, PS_UM_BIT, .Lksp + _bbsi.l a2, PS_UM_BIT, 1f + j .Lksp + .align 4 + .literal_position +1: /* Check if we are currently handling a window exception. */ /* Note: We don't need to indicate that we enter a critical section. */ @@ -475,11 +476,8 @@ _DoubleExceptionVector_handle_exception: rotw -3 j 1b - ENDPROC(_DoubleExceptionVector) - .end literal_prefix - .text /* * Fixup handler for TLB miss in double exception handler for window owerflow. @@ -508,6 +506,8 @@ ENDPROC(_DoubleExceptionVector) * a3: exctable, original value in excsave1 */ + .literal_position + ENTRY(window_overflow_restore_a0_fixup) rsr a0, ps diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 162c77e..70b731e 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -45,24 +45,16 @@ jiffies = jiffies_64; LONG(sym ## _end); \ LONG(LOADADDR(section)) -/* Macro to define a section for a vector. - * - * Use of the MIN function catches the types of errors illustrated in - * the following example: - * - * Assume the section .DoubleExceptionVector.literal is completely - * full. Then a programmer adds code to .DoubleExceptionVector.text - * that produces another literal. The final literal position will - * overlay onto the first word of the adjacent code section - * .DoubleExceptionVector.text. (In practice, the literals will - * overwrite the code, and the first few instructions will be - * garbage.) +/* + * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is + * defined code for every vector is located with other init data. At startup + * time head.S copies code for every vector to its final position according + * to description recorded in the corresponding RELOCATE_ENTRY. */ #ifdef CONFIG_VECTORS_OFFSET -#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \ - section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \ - LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ +#define SECTION_VECTOR(sym, section, addr, prevsec) \ + section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ { \ . = ALIGN(4); \ sym ## _start = ABSOLUTE(.); \ @@ -112,26 +104,19 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 6 SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) #endif - SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4) SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) - SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4) SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) - SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) - SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20) SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) #endif + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + ENTRY_TEXT TEXT_TEXT - VMLINUX_SYMBOL(__sched_text_start) = .; - *(.sched.literal .sched.text) - VMLINUX_SYMBOL(__sched_text_end) = .; - VMLINUX_SYMBOL(__cpuidle_text_start) = .; - *(.cpuidle.literal .cpuidle.text) - VMLINUX_SYMBOL(__cpuidle_text_end) = .; - VMLINUX_SYMBOL(__lock_text_start) = .; - *(.spinlock.literal .spinlock.text) - VMLINUX_SYMBOL(__lock_text_end) = .; + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT } _etext = .; @@ -196,8 +181,6 @@ SECTIONS .KernelExceptionVector.text); RELOCATE_ENTRY(_UserExceptionVector_text, .UserExceptionVector.text); - RELOCATE_ENTRY(_DoubleExceptionVector_literal, - .DoubleExceptionVector.literal); RELOCATE_ENTRY(_DoubleExceptionVector_text, .DoubleExceptionVector.text); RELOCATE_ENTRY(_DebugInterruptVector_text, @@ -230,25 +213,19 @@ SECTIONS SECTION_VECTOR (_WindowVectors_text, .WindowVectors.text, - WINDOW_VECTORS_VADDR, 4, + WINDOW_VECTORS_VADDR, .dummy) - SECTION_VECTOR (_DebugInterruptVector_literal, - .DebugInterruptVector.literal, - DEBUG_VECTOR_VADDR - 4, - SIZEOF(.WindowVectors.text), - .WindowVectors.text) SECTION_VECTOR (_DebugInterruptVector_text, .DebugInterruptVector.text, DEBUG_VECTOR_VADDR, - 4, - .DebugInterruptVector.literal) + .WindowVectors.text) #undef LAST #define LAST .DebugInterruptVector.text #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR (_Level2InterruptVector_text, .Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level2InterruptVector.text #endif @@ -256,7 +233,7 @@ SECTIONS SECTION_VECTOR (_Level3InterruptVector_text, .Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level3InterruptVector.text #endif @@ -264,7 +241,7 @@ SECTIONS SECTION_VECTOR (_Level4InterruptVector_text, .Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level4InterruptVector.text #endif @@ -272,7 +249,7 @@ SECTIONS SECTION_VECTOR (_Level5InterruptVector_text, .Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level5InterruptVector.text #endif @@ -280,40 +257,23 @@ SECTIONS SECTION_VECTOR (_Level6InterruptVector_text, .Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level6InterruptVector.text #endif - SECTION_VECTOR (_KernelExceptionVector_literal, - .KernelExceptionVector.literal, - KERNEL_VECTOR_VADDR - 4, - SIZEOF(LAST), LAST) -#undef LAST SECTION_VECTOR (_KernelExceptionVector_text, .KernelExceptionVector.text, KERNEL_VECTOR_VADDR, - 4, - .KernelExceptionVector.literal) - SECTION_VECTOR (_UserExceptionVector_literal, - .UserExceptionVector.literal, - USER_VECTOR_VADDR - 4, - SIZEOF(.KernelExceptionVector.text), - .KernelExceptionVector.text) + LAST) +#undef LAST SECTION_VECTOR (_UserExceptionVector_text, .UserExceptionVector.text, USER_VECTOR_VADDR, - 4, - .UserExceptionVector.literal) - SECTION_VECTOR (_DoubleExceptionVector_literal, - .DoubleExceptionVector.literal, - DOUBLEEXC_VECTOR_VADDR - 20, - SIZEOF(.UserExceptionVector.text), - .UserExceptionVector.text) + .KernelExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, - 20, - .DoubleExceptionVector.literal) + .UserExceptionVector.text) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; @@ -323,7 +283,6 @@ SECTIONS SECTION_VECTOR (_SecondaryResetVector_text, .SecondaryResetVector.text, RESET_VECTOR1_VADDR, - SIZEOF(.DoubleExceptionVector.text), .DoubleExceptionVector.text) . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); @@ -373,5 +332,4 @@ SECTIONS /* Sections to be discarded */ DISCARDS - /DISCARD/ : { *(.exit.literal) } } -- cgit v1.1 From 2da03d4114b2587f0e8e45f4862074e34daee64e Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sat, 9 Dec 2017 18:44:11 -0800 Subject: xtensa: use call instead of callx in assembly code Now that xtensa assembly sources are compiled with -mlongcalls let the assembler and linker relax call instructions into l32r + callx where needed. This change makes the code cleaner and potentially a bit faster. Signed-off-by: Max Filippov --- arch/xtensa/kernel/coprocessor.S | 3 +-- arch/xtensa/kernel/entry.S | 56 ++++++++++++++-------------------------- arch/xtensa/kernel/head.S | 10 +++---- arch/xtensa/kernel/vectors.S | 3 +-- 4 files changed, 24 insertions(+), 48 deletions(-) diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 3a98503..4f8b52d 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore) ENTRY(fast_coprocessor_double) wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 + call0 unrecoverable_exception ENDPROC(fast_coprocessor_double) diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 5d57078..5a2110b 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -476,8 +476,7 @@ common_exception_return: 1: irq_save a2, a3 #ifdef CONFIG_TRACE_IRQFLAGS - movi a4, trace_hardirqs_off - callx4 a4 + call4 trace_hardirqs_off #endif /* Jump if we are returning from kernel exceptions. */ @@ -504,24 +503,20 @@ common_exception_return: /* Call do_signal() */ #ifdef CONFIG_TRACE_IRQFLAGS - movi a4, trace_hardirqs_on - callx4 a4 + call4 trace_hardirqs_on #endif rsil a2, 0 - movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) mov a6, a1 - callx4 a4 + call4 do_notify_resume # int do_notify_resume(struct pt_regs*) j 1b 3: /* Reschedule */ #ifdef CONFIG_TRACE_IRQFLAGS - movi a4, trace_hardirqs_on - callx4 a4 + call4 trace_hardirqs_on #endif rsil a2, 0 - movi a4, schedule # void schedule (void) - callx4 a4 + call4 schedule # void schedule (void) j 1b #ifdef CONFIG_PREEMPT @@ -532,8 +527,7 @@ common_exception_return: l32i a4, a2, TI_PRE_COUNT bnez a4, 4f - movi a4, preempt_schedule_irq - callx4 a4 + call4 preempt_schedule_irq j 1b #endif @@ -546,23 +540,20 @@ common_exception_return: 5: #ifdef CONFIG_HAVE_HW_BREAKPOINT _bbci.l a4, TIF_DB_DISABLED, 7f - movi a4, restore_dbreak - callx4 a4 + call4 restore_dbreak 7: #endif #ifdef CONFIG_DEBUG_TLB_SANITY l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f - movi a4, check_tlb_sanity - callx4 a4 + call4 check_tlb_sanity #endif 6: 4: #ifdef CONFIG_TRACE_IRQFLAGS extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH bgei a4, LOCKLEVEL, 1f - movi a4, trace_hardirqs_on - callx4 a4 + call4 trace_hardirqs_on 1: #endif /* Restore optional registers. */ @@ -938,10 +929,8 @@ ENTRY(unrecoverable_exception) movi a0, 0 addi a1, a1, PT_REGS_OFFSET - movi a4, panic movi a6, unrecoverable_text - - callx4 a4 + call4 panic 1: j 1b @@ -1078,8 +1067,7 @@ ENTRY(fast_syscall_unrecoverable) xsr a2, depc # restore a2, depc wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 + call0 unrecoverable_exception ENDPROC(fast_syscall_unrecoverable) @@ -1418,14 +1406,12 @@ ENTRY(fast_syscall_spill_registers) rsync movi a6, SIGSEGV - movi a4, do_exit - callx4 a4 + call4 do_exit /* shouldn't return, so panic */ wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 # should not return + call0 unrecoverable_exception # should not return 1: j 1b @@ -1571,8 +1557,8 @@ ENDPROC(fast_syscall_spill_registers) ENTRY(fast_second_level_miss_double_kernel) -1: movi a0, unrecoverable_exception - callx0 a0 # should not return +1: + call0 unrecoverable_exception # should not return 1: j 1b ENDPROC(fast_second_level_miss_double_kernel) @@ -1904,9 +1890,8 @@ ENTRY(system_call) l32i a3, a2, PT_AREG2 mov a6, a2 - movi a4, do_syscall_trace_enter s32i a3, a2, PT_SYSCALL - callx4 a4 + call4 do_syscall_trace_enter mov a3, a6 /* syscall = sys_call_table[syscall_nr] */ @@ -1938,9 +1923,8 @@ ENTRY(system_call) 1: /* regs->areg[2] = return_value */ s32i a6, a2, PT_AREG2 - movi a4, do_syscall_trace_leave mov a6, a2 - callx4 a4 + call4 do_syscall_trace_leave retw ENDPROC(system_call) @@ -2056,12 +2040,10 @@ ENTRY(ret_from_fork) /* void schedule_tail (struct task_struct *prev) * Note: prev is still in a6 (return value from fake call4 frame) */ - movi a4, schedule_tail - callx4 a4 + call4 schedule_tail - movi a4, do_syscall_trace_leave mov a6, a1 - callx4 a4 + call4 do_syscall_trace_leave j common_exception_return diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 23ce62e..9c4e943 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -264,11 +264,8 @@ ENTRY(_startup) /* init_arch kick-starts the linux kernel */ - movi a4, init_arch - callx4 a4 - - movi a4, start_kernel - callx4 a4 + call4 init_arch + call4 start_kernel should_never_return: j should_never_return @@ -294,8 +291,7 @@ should_never_return: movi a6, 0 wsr a6, excsave1 - movi a4, secondary_start_kernel - callx4 a4 + call4 secondary_start_kernel j should_never_return #endif /* CONFIG_SMP */ diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 2bc8505..841503d 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -305,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow: .Lunrecoverable: rsr a3, excsave1 wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 + call0 unrecoverable_exception .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ -- cgit v1.1 From 0013aceb307482ba83a5b6a29f6ba1791be0d32b Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sat, 9 Dec 2017 21:18:47 -0800 Subject: xtensa: clean up fixups in assembly code Remove duplicate definitions of EX() and similar TRY/CATCH and SRC/DST macros from assembly sources and put single definition into asm/asmmacro.h Signed-off-by: Max Filippov --- arch/xtensa/include/asm/asmmacro.h | 7 +++ arch/xtensa/kernel/entry.S | 33 ++---------- arch/xtensa/lib/checksum.S | 74 +++++++++++--------------- arch/xtensa/lib/memset.S | 36 +++++-------- arch/xtensa/lib/strncpy_user.S | 52 ++++++++---------- arch/xtensa/lib/strnlen_user.S | 19 +++---- arch/xtensa/lib/usercopy.S | 106 +++++++++++++++++-------------------- 7 files changed, 133 insertions(+), 194 deletions(-) diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h index 746dcc8..d2a4415 100644 --- a/arch/xtensa/include/asm/asmmacro.h +++ b/arch/xtensa/include/asm/asmmacro.h @@ -150,5 +150,12 @@ __endl \ar \as .endm +/* Load or store instructions that may cause exceptions use the EX macro. */ + +#define EX(handler) \ + .section __ex_table, "a"; \ + .word 97f, handler; \ + .previous \ +97: #endif /* _XTENSA_ASMMACRO_H */ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 5a2110b..a27a9a6 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -1094,35 +1095,12 @@ ENDPROC(fast_syscall_unrecoverable) * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Note: we don't have to save a2; a2 holds the return value - * - * We use the two macros TRY and CATCH: - * - * TRY adds an entry to the __ex_table fixup table for the immediately - * following instruction. - * - * CATCH catches any exception that occurred at one of the preceding TRY - * statements and continues from there - * - * Usage TRY l32i a0, a1, 0 - * - * done: rfe - * CATCH - * j done */ .literal_position #ifdef CONFIG_FAST_SYSCALL_XTENSA -#define TRY \ - .section __ex_table, "a"; \ - .word 66f, 67f; \ - .text; \ -66: - -#define CATCH \ -67: - ENTRY(fast_syscall_xtensa) s32i a7, a2, PT_AREG7 # we need an additional register @@ -1136,9 +1114,9 @@ ENTRY(fast_syscall_xtensa) .Lswp: /* Atomic compare and swap */ -TRY l32i a0, a3, 0 # read old value +EX(.Leac) l32i a0, a3, 0 # read old value bne a0, a4, 1f # same as old value? jump -TRY s32i a5, a3, 0 # different, modify value +EX(.Leac) s32i a5, a3, 0 # different, modify value l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 1 # and return 1 @@ -1151,12 +1129,12 @@ TRY s32i a5, a3, 0 # different, modify value .Lnswp: /* Atomic set, add, and exg_add. */ -TRY l32i a7, a3, 0 # orig +EX(.Leac) l32i a7, a3, 0 # orig addi a6, a6, -SYS_XTENSA_ATOMIC_SET add a0, a4, a7 # + arg moveqz a0, a4, a6 # set addi a6, a6, SYS_XTENSA_ATOMIC_SET -TRY s32i a0, a3, 0 # write new value +EX(.Leac) s32i a0, a3, 0 # write new value mov a0, a2 mov a2, a7 @@ -1164,7 +1142,6 @@ TRY s32i a0, a3, 0 # write new value l32i a0, a0, PT_AREG0 # restore a0 rfe -CATCH .Leac: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EFAULT diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S index 4eb573d..528fe0d 100644 --- a/arch/xtensa/lib/checksum.S +++ b/arch/xtensa/lib/checksum.S @@ -14,9 +14,10 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include #include #include +#include /* * computes a partial checksum, e.g. for TCP/UDP fragments @@ -175,23 +176,8 @@ ENDPROC(csum_partial) /* * Copy from ds while checksumming, otherwise like csum_partial - * - * The macros SRC and DST specify the type of access for the instruction. - * thus we can call a custom exception handler for each access type. */ -#define SRC(y...) \ - 9999: y; \ - .section __ex_table, "a"; \ - .long 9999b, 6001f ; \ - .previous - -#define DST(y...) \ - 9999: y; \ - .section __ex_table, "a"; \ - .long 9999b, 6002f ; \ - .previous - /* unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, int sum, int *src_err_ptr, int *dst_err_ptr) @@ -244,28 +230,28 @@ ENTRY(csum_partial_copy_generic) add a10, a10, a2 /* a10 = end of last 32-byte src chunk */ .Loop5: #endif -SRC( l32i a9, a2, 0 ) -SRC( l32i a8, a2, 4 ) -DST( s32i a9, a3, 0 ) -DST( s32i a8, a3, 4 ) +EX(10f) l32i a9, a2, 0 +EX(10f) l32i a8, a2, 4 +EX(11f) s32i a9, a3, 0 +EX(11f) s32i a8, a3, 4 ONES_ADD(a5, a9) ONES_ADD(a5, a8) -SRC( l32i a9, a2, 8 ) -SRC( l32i a8, a2, 12 ) -DST( s32i a9, a3, 8 ) -DST( s32i a8, a3, 12 ) +EX(10f) l32i a9, a2, 8 +EX(10f) l32i a8, a2, 12 +EX(11f) s32i a9, a3, 8 +EX(11f) s32i a8, a3, 12 ONES_ADD(a5, a9) ONES_ADD(a5, a8) -SRC( l32i a9, a2, 16 ) -SRC( l32i a8, a2, 20 ) -DST( s32i a9, a3, 16 ) -DST( s32i a8, a3, 20 ) +EX(10f) l32i a9, a2, 16 +EX(10f) l32i a8, a2, 20 +EX(11f) s32i a9, a3, 16 +EX(11f) s32i a8, a3, 20 ONES_ADD(a5, a9) ONES_ADD(a5, a8) -SRC( l32i a9, a2, 24 ) -SRC( l32i a8, a2, 28 ) -DST( s32i a9, a3, 24 ) -DST( s32i a8, a3, 28 ) +EX(10f) l32i a9, a2, 24 +EX(10f) l32i a8, a2, 28 +EX(11f) s32i a9, a3, 24 +EX(11f) s32i a8, a3, 28 ONES_ADD(a5, a9) ONES_ADD(a5, a8) addi a2, a2, 32 @@ -284,8 +270,8 @@ DST( s32i a8, a3, 28 ) add a10, a10, a2 /* a10 = end of last 4-byte src chunk */ .Loop6: #endif -SRC( l32i a9, a2, 0 ) -DST( s32i a9, a3, 0 ) +EX(10f) l32i a9, a2, 0 +EX(11f) s32i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 4 addi a3, a3, 4 @@ -315,8 +301,8 @@ DST( s32i a9, a3, 0 ) add a10, a10, a2 /* a10 = end of last 2-byte src chunk */ .Loop7: #endif -SRC( l16ui a9, a2, 0 ) -DST( s16i a9, a3, 0 ) +EX(10f) l16ui a9, a2, 0 +EX(11f) s16i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 2 addi a3, a3, 2 @@ -326,8 +312,8 @@ DST( s16i a9, a3, 0 ) 4: /* This section processes a possible trailing odd byte. */ _bbci.l a4, 0, 8f /* 1-byte chunk */ -SRC( l8ui a9, a2, 0 ) -DST( s8i a9, a3, 0 ) +EX(10f) l8ui a9, a2, 0 +EX(11f) s8i a9, a3, 0 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* shift byte to bits 8..15 */ #endif @@ -350,10 +336,10 @@ DST( s8i a9, a3, 0 ) add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */ .Loop8: #endif -SRC( l8ui a9, a2, 0 ) -SRC( l8ui a8, a2, 1 ) -DST( s8i a9, a3, 0 ) -DST( s8i a8, a3, 1 ) +EX(10f) l8ui a9, a2, 0 +EX(10f) l8ui a8, a2, 1 +EX(11f) s8i a9, a3, 0 +EX(11f) s8i a8, a3, 1 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* combine into a single 16-bit value */ #else /* for checksum computation */ @@ -381,7 +367,7 @@ ENDPROC(csum_partial_copy_generic) a12 = original dst for exception handling */ -6001: +10: _movi a2, -EFAULT s32i a2, a6, 0 /* src_err_ptr */ @@ -403,7 +389,7 @@ ENDPROC(csum_partial_copy_generic) 2: retw -6002: +11: movi a2, -EFAULT s32i a2, a7, 0 /* dst_err_ptr */ movi a2, 0 diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S index 10b8c40..7a724ed 100644 --- a/arch/xtensa/lib/memset.S +++ b/arch/xtensa/lib/memset.S @@ -12,6 +12,7 @@ */ #include +#include /* * void *memset(void *dst, int c, size_t length) @@ -28,15 +29,6 @@ * the alignment labels). */ -/* Load or store instructions that may cause exceptions use the EX macro. */ - -#define EX(insn,reg1,reg2,offset,handler) \ -9: insn reg1, reg2, offset; \ - .section __ex_table, "a"; \ - .word 9b, handler; \ - .previous - - .text .align 4 .global memset @@ -73,10 +65,10 @@ memset: add a6, a6, a5 # a6 = end of last 16B chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: - EX(s32i, a3, a5, 0, memset_fixup) - EX(s32i, a3, a5, 4, memset_fixup) - EX(s32i, a3, a5, 8, memset_fixup) - EX(s32i, a3, a5, 12, memset_fixup) +EX(10f) s32i a3, a5, 0 +EX(10f) s32i a3, a5, 4 +EX(10f) s32i a3, a5, 8 +EX(10f) s32i a3, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a5, a6, .Loop1 @@ -84,23 +76,23 @@ memset: .Loop1done: bbci.l a4, 3, .L2 # set 8 bytes - EX(s32i, a3, a5, 0, memset_fixup) - EX(s32i, a3, a5, 4, memset_fixup) +EX(10f) s32i a3, a5, 0 +EX(10f) s32i a3, a5, 4 addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # set 4 bytes - EX(s32i, a3, a5, 0, memset_fixup) +EX(10f) s32i a3, a5, 0 addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # set 2 bytes - EX(s16i, a3, a5, 0, memset_fixup) +EX(10f) s16i a3, a5, 0 addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # set 1 byte - EX(s8i, a3, a5, 0, memset_fixup) +EX(10f) s8i a3, a5, 0 .L5: .Lret1: retw @@ -114,7 +106,7 @@ memset: bbci.l a5, 0, .L20 # branch if dst alignment half-aligned # dst is only byte aligned # set 1 byte - EX(s8i, a3, a5, 0, memset_fixup) +EX(10f) s8i a3, a5, 0 addi a5, a5, 1 addi a4, a4, -1 # now retest if dst aligned @@ -122,7 +114,7 @@ memset: .L20: # dst half-aligned # set 2 bytes - EX(s16i, a3, a5, 0, memset_fixup) +EX(10f) s16i a3, a5, 0 addi a5, a5, 2 addi a4, a4, -2 j .L0 # dst is now aligned, return to main algorithm @@ -141,7 +133,7 @@ memset: add a6, a5, a4 # a6 = ending address #endif /* !XCHAL_HAVE_LOOPS */ .Lbyteloop: - EX(s8i, a3, a5, 0, memset_fixup) +EX(10f) s8i a3, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a5, a6, .Lbyteloop @@ -155,6 +147,6 @@ memset: /* We return zero if a failure occurred. */ -memset_fixup: +10: movi a2, 0 retw diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S index 1ad0ecf..827e1b3 100644 --- a/arch/xtensa/lib/strncpy_user.S +++ b/arch/xtensa/lib/strncpy_user.S @@ -11,16 +11,9 @@ * Copyright (C) 2002 Tensilica Inc. */ -#include #include - -/* Load or store instructions that may cause exceptions use the EX macro. */ - -#define EX(insn,reg1,reg2,offset,handler) \ -9: insn reg1, reg2, offset; \ - .section __ex_table, "a"; \ - .word 9b, handler; \ - .previous +#include +#include /* * char *__strncpy_user(char *dst, const char *src, size_t len) @@ -75,9 +68,9 @@ __strncpy_user: j .Ldstunaligned .Lsrc1mod2: # src address is odd - EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 +EX(11f) l8ui a9, a3, 0 # get byte 0 addi a3, a3, 1 # advance src pointer - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 +EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len @@ -85,16 +78,16 @@ __strncpy_user: bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned .Lsrc2mod4: # src address is 2 mod 4 - EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 +EX(11f) l8ui a9, a3, 0 # get byte 0 /* 1-cycle interlock */ - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 +EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len beqz a4, .Lret # if len is zero - EX(l8ui, a9, a3, 1, fixup_l) # get byte 0 +EX(11f) l8ui a9, a3, 1 # get byte 0 addi a3, a3, 2 # advance src pointer - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 +EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len @@ -117,12 +110,12 @@ __strncpy_user: add a12, a12, a11 # a12 = end of last 4B chunck #endif .Loop1: - EX(l32i, a9, a3, 0, fixup_l) # get word from src +EX(11f) l32i a9, a3, 0 # get word from src addi a3, a3, 4 # advance src pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a7, .Lz2 # if byte 2 is zero - EX(s32i, a9, a11, 0, fixup_s) # store word to dst +EX(10f) s32i a9, a11, 0 # store word to dst bnone a9, a8, .Lz3 # if byte 3 is zero addi a11, a11, 4 # advance dst pointer #if !XCHAL_HAVE_LOOPS @@ -132,7 +125,7 @@ __strncpy_user: .Loop1done: bbci.l a4, 1, .L100 # copy 2 bytes - EX(l16ui, a9, a3, 0, fixup_l) +EX(11f) l16ui a9, a3, 0 addi a3, a3, 2 # advance src pointer #ifdef __XTENSA_EB__ bnone a9, a7, .Lz0 # if byte 2 is zero @@ -141,13 +134,13 @@ __strncpy_user: bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero #endif - EX(s16i, a9, a11, 0, fixup_s) +EX(10f) s16i a9, a11, 0 addi a11, a11, 2 # advance dst pointer .L100: bbci.l a4, 0, .Lret - EX(l8ui, a9, a3, 0, fixup_l) +EX(11f) l8ui a9, a3, 0 /* slot */ - EX(s8i, a9, a11, 0, fixup_s) +EX(10f) s8i a9, a11, 0 beqz a9, .Lret # if byte is zero addi a11, a11, 1-3 # advance dst ptr 1, but also cancel # the effect of adding 3 in .Lz3 code @@ -161,14 +154,14 @@ __strncpy_user: #ifdef __XTENSA_EB__ movi a9, 0 #endif /* __XTENSA_EB__ */ - EX(s8i, a9, a11, 0, fixup_s) +EX(10f) s8i a9, a11, 0 sub a2, a11, a2 # compute strlen retw .Lz1: # byte 1 is zero #ifdef __XTENSA_EB__ extui a9, a9, 16, 16 #endif /* __XTENSA_EB__ */ - EX(s16i, a9, a11, 0, fixup_s) +EX(10f) s16i a9, a11, 0 addi a11, a11, 1 # advance dst pointer sub a2, a11, a2 # compute strlen retw @@ -176,9 +169,9 @@ __strncpy_user: #ifdef __XTENSA_EB__ extui a9, a9, 16, 16 #endif /* __XTENSA_EB__ */ - EX(s16i, a9, a11, 0, fixup_s) +EX(10f) s16i a9, a11, 0 movi a9, 0 - EX(s8i, a9, a11, 2, fixup_s) +EX(10f) s8i a9, a11, 2 addi a11, a11, 2 # advance dst pointer sub a2, a11, a2 # compute strlen retw @@ -196,9 +189,9 @@ __strncpy_user: add a12, a11, a4 # a12 = ending address #endif /* XCHAL_HAVE_LOOPS */ .Lnextbyte: - EX(l8ui, a9, a3, 0, fixup_l) +EX(11f) l8ui a9, a3, 0 addi a3, a3, 1 - EX(s8i, a9, a11, 0, fixup_s) +EX(10f) s8i a9, a11, 0 beqz a9, .Lunalignedend addi a11, a11, 1 #if !XCHAL_HAVE_LOOPS @@ -218,8 +211,7 @@ __strncpy_user: * implementation in memset(). Thus, we differentiate between * load/store fixups. */ -fixup_s: -fixup_l: +10: +11: movi a2, -EFAULT retw - diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S index 4c03b1e..9404ac4 100644 --- a/arch/xtensa/lib/strnlen_user.S +++ b/arch/xtensa/lib/strnlen_user.S @@ -12,14 +12,7 @@ */ #include - -/* Load or store instructions that may cause exceptions use the EX macro. */ - -#define EX(insn,reg1,reg2,offset,handler) \ -9: insn reg1, reg2, offset; \ - .section __ex_table, "a"; \ - .word 9b, handler; \ - .previous +#include /* * size_t __strnlen_user(const char *s, size_t len) @@ -77,7 +70,7 @@ __strnlen_user: add a10, a10, a4 # a10 = end of last 4B chunk #endif /* XCHAL_HAVE_LOOPS */ .Loop: - EX(l32i, a9, a4, 4, lenfixup) # get next word of string +EX(10f) l32i a9, a4, 4 # get next word of string addi a4, a4, 4 # advance string pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero @@ -88,7 +81,7 @@ __strnlen_user: #endif .Ldone: - EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks +EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks bbci.l a3, 1, .L100 # check two more bytes (bytes 0, 1 of word) @@ -125,14 +118,14 @@ __strnlen_user: retw .L1mod2: # address is odd - EX(l8ui, a9, a4, 4, lenfixup) # get byte 0 +EX(10f) l8ui a9, a4, 4 # get byte 0 addi a4, a4, 1 # advance string pointer beqz a9, .Lz3 # if byte 0 is zero bbci.l a4, 1, .Laligned # if string pointer is now word-aligned .L2mod4: # address is 2 mod 4 addi a4, a4, 2 # advance ptr for aligned access - EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string +EX(10f) l32i a9, a4, 0 # get word with first two bytes of string bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero # byte 3 is zero @@ -142,6 +135,6 @@ __strnlen_user: .section .fixup, "ax" .align 4 -lenfixup: +10: movi a2, 0 retw diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S index d9cd766..4172b73 100644 --- a/arch/xtensa/lib/usercopy.S +++ b/arch/xtensa/lib/usercopy.S @@ -54,6 +54,7 @@ */ #include +#include #ifdef __XTENSA_EB__ #define ALIGN(R, W0, W1) src R, W0, W1 @@ -63,15 +64,6 @@ #define SSA8(R) ssa8l R #endif -/* Load or store instructions that may cause exceptions use the EX macro. */ - -#define EX(insn,reg1,reg2,offset,handler) \ -9: insn reg1, reg2, offset; \ - .section __ex_table, "a"; \ - .word 9b, handler; \ - .previous - - .text .align 4 .global __xtensa_copy_user @@ -102,9 +94,9 @@ __xtensa_copy_user: bltui a4, 7, .Lbytecopy # do short copies byte by byte # copy 1 byte - EX(l8ui, a6, a3, 0, fixup) +EX(10f) l8ui a6, a3, 0 addi a3, a3, 1 - EX(s8i, a6, a5, 0, fixup) +EX(10f) s8i a6, a5, 0 addi a5, a5, 1 addi a4, a4, -1 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then @@ -112,11 +104,11 @@ __xtensa_copy_user: .Ldst2mod4: # dst 16-bit aligned # copy 2 bytes bltui a4, 6, .Lbytecopy # do short copies byte by byte - EX(l8ui, a6, a3, 0, fixup) - EX(l8ui, a7, a3, 1, fixup) +EX(10f) l8ui a6, a3, 0 +EX(10f) l8ui a7, a3, 1 addi a3, a3, 2 - EX(s8i, a6, a5, 0, fixup) - EX(s8i, a7, a5, 1, fixup) +EX(10f) s8i a6, a5, 0 +EX(10f) s8i a7, a5, 1 addi a5, a5, 2 addi a4, a4, -2 j .Ldstaligned # dst is now aligned, return to main algorithm @@ -135,9 +127,9 @@ __xtensa_copy_user: add a7, a3, a4 # a7 = end address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lnextbyte: - EX(l8ui, a6, a3, 0, fixup) +EX(10f) l8ui a6, a3, 0 addi a3, a3, 1 - EX(s8i, a6, a5, 0, fixup) +EX(10f) s8i a6, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a3, a7, .Lnextbyte @@ -161,15 +153,15 @@ __xtensa_copy_user: add a8, a8, a3 # a8 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: - EX(l32i, a6, a3, 0, fixup) - EX(l32i, a7, a3, 4, fixup) - EX(s32i, a6, a5, 0, fixup) - EX(l32i, a6, a3, 8, fixup) - EX(s32i, a7, a5, 4, fixup) - EX(l32i, a7, a3, 12, fixup) - EX(s32i, a6, a5, 8, fixup) +EX(10f) l32i a6, a3, 0 +EX(10f) l32i a7, a3, 4 +EX(10f) s32i a6, a5, 0 +EX(10f) l32i a6, a3, 8 +EX(10f) s32i a7, a5, 4 +EX(10f) l32i a7, a3, 12 +EX(10f) s32i a6, a5, 8 addi a3, a3, 16 - EX(s32i, a7, a5, 12, fixup) +EX(10f) s32i a7, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a8, .Loop1 @@ -177,31 +169,31 @@ __xtensa_copy_user: .Loop1done: bbci.l a4, 3, .L2 # copy 8 bytes - EX(l32i, a6, a3, 0, fixup) - EX(l32i, a7, a3, 4, fixup) +EX(10f) l32i a6, a3, 0 +EX(10f) l32i a7, a3, 4 addi a3, a3, 8 - EX(s32i, a6, a5, 0, fixup) - EX(s32i, a7, a5, 4, fixup) +EX(10f) s32i a6, a5, 0 +EX(10f) s32i a7, a5, 4 addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # copy 4 bytes - EX(l32i, a6, a3, 0, fixup) +EX(10f) l32i a6, a3, 0 addi a3, a3, 4 - EX(s32i, a6, a5, 0, fixup) +EX(10f) s32i a6, a5, 0 addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # copy 2 bytes - EX(l16ui, a6, a3, 0, fixup) +EX(10f) l16ui a6, a3, 0 addi a3, a3, 2 - EX(s16i, a6, a5, 0, fixup) +EX(10f) s16i a6, a5, 0 addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # copy 1 byte - EX(l8ui, a6, a3, 0, fixup) - EX(s8i, a6, a5, 0, fixup) +EX(10f) l8ui a6, a3, 0 +EX(10f) s8i a6, a5, 0 .L5: movi a2, 0 # return success for len bytes copied retw @@ -217,7 +209,7 @@ __xtensa_copy_user: # copy 16 bytes per iteration for word-aligned dst and unaligned src and a10, a3, a8 # save unalignment offset for below sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) - EX(l32i, a6, a3, 0, fixup) # load first word +EX(10f) l32i a6, a3, 0 # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ @@ -226,19 +218,19 @@ __xtensa_copy_user: add a12, a12, a3 # a12 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: - EX(l32i, a7, a3, 4, fixup) - EX(l32i, a8, a3, 8, fixup) +EX(10f) l32i a7, a3, 4 +EX(10f) l32i a8, a3, 8 ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, fixup) - EX(l32i, a9, a3, 12, fixup) +EX(10f) s32i a6, a5, 0 +EX(10f) l32i a9, a3, 12 ALIGN( a7, a7, a8) - EX(s32i, a7, a5, 4, fixup) - EX(l32i, a6, a3, 16, fixup) +EX(10f) s32i a7, a5, 4 +EX(10f) l32i a6, a3, 16 ALIGN( a8, a8, a9) - EX(s32i, a8, a5, 8, fixup) +EX(10f) s32i a8, a5, 8 addi a3, a3, 16 ALIGN( a9, a9, a6) - EX(s32i, a9, a5, 12, fixup) +EX(10f) s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a12, .Loop2 @@ -246,39 +238,39 @@ __xtensa_copy_user: .Loop2done: bbci.l a4, 3, .L12 # copy 8 bytes - EX(l32i, a7, a3, 4, fixup) - EX(l32i, a8, a3, 8, fixup) +EX(10f) l32i a7, a3, 4 +EX(10f) l32i a8, a3, 8 ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, fixup) +EX(10f) s32i a6, a5, 0 addi a3, a3, 8 ALIGN( a7, a7, a8) - EX(s32i, a7, a5, 4, fixup) +EX(10f) s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 .L12: bbci.l a4, 2, .L13 # copy 4 bytes - EX(l32i, a7, a3, 4, fixup) +EX(10f) l32i a7, a3, 4 addi a3, a3, 4 ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, fixup) +EX(10f) s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 .L13: add a3, a3, a10 # readjust a3 with correct misalignment bbci.l a4, 1, .L14 # copy 2 bytes - EX(l8ui, a6, a3, 0, fixup) - EX(l8ui, a7, a3, 1, fixup) +EX(10f) l8ui a6, a3, 0 +EX(10f) l8ui a7, a3, 1 addi a3, a3, 2 - EX(s8i, a6, a5, 0, fixup) - EX(s8i, a7, a5, 1, fixup) +EX(10f) s8i a6, a5, 0 +EX(10f) s8i a7, a5, 1 addi a5, a5, 2 .L14: bbci.l a4, 0, .L15 # copy 1 byte - EX(l8ui, a6, a3, 0, fixup) - EX(s8i, a6, a5, 0, fixup) +EX(10f) l8ui a6, a3, 0 +EX(10f) s8i a6, a5, 0 .L15: movi a2, 0 # return success for len bytes copied retw @@ -294,7 +286,7 @@ __xtensa_copy_user: */ -fixup: +10: sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */ retw -- cgit v1.1 From fbb871e220672a8e9e4e7870da5b206fe05904b2 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sat, 9 Dec 2017 21:21:35 -0800 Subject: xtensa: clean up word alignment macros in assembly code Remove duplicate definitions of ALIGN/src_b/__src_b and SSA8/ssa8/__ssa8 from assembly sources and put single definition into asm/asmmacro.h Signed-off-by: Max Filippov --- arch/xtensa/include/asm/asmmacro.h | 33 +++++++++++++++++++++++++ arch/xtensa/kernel/align.S | 5 +--- arch/xtensa/lib/memcopy.S | 49 +++++++++++++------------------------- arch/xtensa/lib/usercopy.S | 24 +++++++------------ 4 files changed, 59 insertions(+), 52 deletions(-) diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h index d2a4415..7f2ae58 100644 --- a/arch/xtensa/include/asm/asmmacro.h +++ b/arch/xtensa/include/asm/asmmacro.h @@ -158,4 +158,37 @@ .previous \ 97: + +/* + * Extract unaligned word that is split between two registers w0 and w1 + * into r regardless of machine endianness. SAR must be loaded with the + * starting bit of the word (see __ssa8). + */ + + .macro __src_b r, w0, w1 +#ifdef __XTENSA_EB__ + src \r, \w0, \w1 +#else + src \r, \w1, \w0 +#endif + .endm + +/* + * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned + * word starting at r from two registers loaded from consecutive aligned + * addresses covering r regardless of machine endianness. + * + * r 0 1 2 3 + * LE SAR 0 8 16 24 + * BE SAR 32 24 16 8 + */ + + .macro __ssa8 r +#ifdef __XTENSA_EB__ + ssa8b \r +#else + ssa8l \r +#endif + .endm + #endif /* _XTENSA_ASMMACRO_H */ diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 24b3189..9301452e 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -19,6 +19,7 @@ #include #include #include +#include #include #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION @@ -66,8 +67,6 @@ #define INSN_T 24 #define INSN_OP1 16 -.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm -.macro __ssa8 r; ssa8b \r; .endm .macro __ssa8r r; ssa8l \r; .endm .macro __sh r, s; srl \r, \s; .endm .macro __sl r, s; sll \r, \s; .endm @@ -81,8 +80,6 @@ #define INSN_T 4 #define INSN_OP1 12 -.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm -.macro __ssa8 r; ssa8l \r; .endm .macro __ssa8r r; ssa8b \r; .endm .macro __sh r, s; sll \r, \s; .endm .macro __sl r, s; srl \r, \s; .endm diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S index b1c219a..9bda748 100644 --- a/arch/xtensa/lib/memcopy.S +++ b/arch/xtensa/lib/memcopy.S @@ -10,22 +10,7 @@ */ #include - - .macro src_b r, w0, w1 -#ifdef __XTENSA_EB__ - src \r, \w0, \w1 -#else - src \r, \w1, \w0 -#endif - .endm - - .macro ssa8 r -#ifdef __XTENSA_EB__ - ssa8b \r -#else - ssa8l \r -#endif - .endm +#include /* * void *memcpy(void *dst, const void *src, size_t len); @@ -209,7 +194,7 @@ memcpy: .Lsrcunaligned: _beqz a4, .Ldone # avoid loading anything for zero-length copies # copy 16 bytes per iteration for word-aligned dst and unaligned src - ssa8 a3 # set shift amount from byte offset + __ssa8 a3 # set shift amount from byte offset /* set to 1 when running on ISS (simulator) with the lint or ferret client, or 0 to save a few cycles */ @@ -229,16 +214,16 @@ memcpy: .Loop2: l32i a7, a3, 4 l32i a8, a3, 8 - src_b a6, a6, a7 + __src_b a6, a6, a7 s32i a6, a5, 0 l32i a9, a3, 12 - src_b a7, a7, a8 + __src_b a7, a7, a8 s32i a7, a5, 4 l32i a6, a3, 16 - src_b a8, a8, a9 + __src_b a8, a8, a9 s32i a8, a5, 8 addi a3, a3, 16 - src_b a9, a9, a6 + __src_b a9, a9, a6 s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS @@ -249,10 +234,10 @@ memcpy: # copy 8 bytes l32i a7, a3, 4 l32i a8, a3, 8 - src_b a6, a6, a7 + __src_b a6, a6, a7 s32i a6, a5, 0 addi a3, a3, 8 - src_b a7, a7, a8 + __src_b a7, a7, a8 s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 @@ -261,7 +246,7 @@ memcpy: # copy 4 bytes l32i a7, a3, 4 addi a3, a3, 4 - src_b a6, a6, a7 + __src_b a6, a6, a7 s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 @@ -485,7 +470,7 @@ memmove: .Lbacksrcunaligned: _beqz a4, .Lbackdone # avoid loading anything for zero-length copies # copy 16 bytes per iteration for word-aligned dst and unaligned src - ssa8 a3 # set shift amount from byte offset + __ssa8 a3 # set shift amount from byte offset #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with * the lint or ferret client, or 0 * to save a few cycles */ @@ -506,15 +491,15 @@ memmove: l32i a7, a3, 12 l32i a8, a3, 8 addi a5, a5, -16 - src_b a6, a7, a6 + __src_b a6, a7, a6 s32i a6, a5, 12 l32i a9, a3, 4 - src_b a7, a8, a7 + __src_b a7, a8, a7 s32i a7, a5, 8 l32i a6, a3, 0 - src_b a8, a9, a8 + __src_b a8, a9, a8 s32i a8, a5, 4 - src_b a9, a6, a9 + __src_b a9, a6, a9 s32i a9, a5, 0 #if !XCHAL_HAVE_LOOPS bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start @@ -526,9 +511,9 @@ memmove: l32i a7, a3, 4 l32i a8, a3, 0 addi a5, a5, -8 - src_b a6, a7, a6 + __src_b a6, a7, a6 s32i a6, a5, 4 - src_b a7, a8, a7 + __src_b a7, a8, a7 s32i a7, a5, 0 mov a6, a8 .Lback12: @@ -537,7 +522,7 @@ memmove: addi a3, a3, -4 l32i a7, a3, 0 addi a5, a5, -4 - src_b a6, a7, a6 + __src_b a6, a7, a6 s32i a6, a5, 0 mov a6, a7 .Lback13: diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S index 4172b73..0959b6e 100644 --- a/arch/xtensa/lib/usercopy.S +++ b/arch/xtensa/lib/usercopy.S @@ -56,14 +56,6 @@ #include #include -#ifdef __XTENSA_EB__ -#define ALIGN(R, W0, W1) src R, W0, W1 -#define SSA8(R) ssa8b R -#else -#define ALIGN(R, W0, W1) src R, W1, W0 -#define SSA8(R) ssa8l R -#endif - .text .align 4 .global __xtensa_copy_user @@ -81,7 +73,7 @@ __xtensa_copy_user: # per iteration movi a8, 3 # if source is also aligned, bnone a3, a8, .Laligned # then use word copy - SSA8( a3) # set shift amount from byte offset + __ssa8 a3 # set shift amount from byte offset bnez a4, .Lsrcunaligned movi a2, 0 # return success for len==0 retw @@ -220,16 +212,16 @@ EX(10f) l32i a6, a3, 0 # load first word .Loop2: EX(10f) l32i a7, a3, 4 EX(10f) l32i a8, a3, 8 - ALIGN( a6, a6, a7) + __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 EX(10f) l32i a9, a3, 12 - ALIGN( a7, a7, a8) + __src_b a7, a7, a8 EX(10f) s32i a7, a5, 4 EX(10f) l32i a6, a3, 16 - ALIGN( a8, a8, a9) + __src_b a8, a8, a9 EX(10f) s32i a8, a5, 8 addi a3, a3, 16 - ALIGN( a9, a9, a6) + __src_b a9, a9, a6 EX(10f) s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS @@ -240,10 +232,10 @@ EX(10f) s32i a9, a5, 12 # copy 8 bytes EX(10f) l32i a7, a3, 4 EX(10f) l32i a8, a3, 8 - ALIGN( a6, a6, a7) + __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 addi a3, a3, 8 - ALIGN( a7, a7, a8) + __src_b a7, a7, a8 EX(10f) s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 @@ -252,7 +244,7 @@ EX(10f) s32i a7, a5, 4 # copy 4 bytes EX(10f) l32i a7, a3, 4 addi a3, a3, 4 - ALIGN( a6, a6, a7) + __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 -- cgit v1.1 From 5cf97ebd8b40e2b1791136fc1476d17365864b18 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sat, 9 Dec 2017 21:22:37 -0800 Subject: xtensa: clean up functions in assembly code Use ENTRY and ENDPROC throughout arch/xtensa/lib assembly sources. Introduce asm/linkage.h and define xtensa-specific __ALIGN macro there. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/linkage.h | 9 +++++++++ arch/xtensa/lib/memcopy.S | 30 ++++++++++-------------------- arch/xtensa/lib/memset.S | 8 ++++---- arch/xtensa/lib/strncpy_user.S | 8 ++++---- arch/xtensa/lib/strnlen_user.S | 9 +++++---- arch/xtensa/lib/usercopy.S | 8 ++++---- 6 files changed, 36 insertions(+), 36 deletions(-) create mode 100644 arch/xtensa/include/asm/linkage.h diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h new file mode 100644 index 0000000..0ba9973 --- /dev/null +++ b/arch/xtensa/include/asm/linkage.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 4 +#define __ALIGN_STR ".align 4" + +#endif diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S index 9bda748..24d6508 100644 --- a/arch/xtensa/lib/memcopy.S +++ b/arch/xtensa/lib/memcopy.S @@ -9,6 +9,7 @@ * Copyright (C) 2002 - 2012 Tensilica Inc. */ +#include #include #include @@ -108,10 +109,7 @@ addi a5, a5, 2 j .Ldstaligned # dst is now aligned, return to main algorithm - .align 4 - .global memcpy - .type memcpy,@function -memcpy: +ENTRY(memcpy) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len @@ -273,14 +271,14 @@ memcpy: s8i a6, a5, 0 retw +ENDPROC(memcpy) /* * void bcopy(const void *src, void *dest, size_t n); */ - .align 4 - .global bcopy - .type bcopy,@function -bcopy: + +ENTRY(bcopy) + entry sp, 16 # minimal stack frame # a2=src, a3=dst, a4=len mov a5, a3 @@ -288,6 +286,8 @@ bcopy: mov a2, a5 j .Lmovecommon # go to common code for memmove+bcopy +ENDPROC(bcopy) + /* * void *memmove(void *dst, const void *src, size_t len); * @@ -376,10 +376,7 @@ bcopy: j .Lbackdstaligned # dst is now aligned, # return to main algorithm - .align 4 - .global memmove - .type memmove,@function -memmove: +ENTRY(memmove) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len @@ -551,11 +548,4 @@ memmove: s8i a6, a5, 0 retw - -/* - * Local Variables: - * mode:fundamental - * comment-start: "# " - * comment-start-skip: "# *" - * End: - */ +ENDPROC(memmove) diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S index 7a724ed..a6cd04b 100644 --- a/arch/xtensa/lib/memset.S +++ b/arch/xtensa/lib/memset.S @@ -11,6 +11,7 @@ * Copyright (C) 2002 Tensilica Inc. */ +#include #include #include @@ -30,10 +31,8 @@ */ .text -.align 4 -.global memset -.type memset,@function -memset: +ENTRY(memset) + entry sp, 16 # minimal stack frame # a2/ dst, a3/ c, a4/ length extui a3, a3, 0, 8 # mask to just 8 bits @@ -141,6 +140,7 @@ EX(10f) s8i a3, a5, 0 .Lbytesetdone: retw +ENDPROC(memset) .section .fixup, "ax" .align 4 diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S index 827e1b3..5fce16b 100644 --- a/arch/xtensa/lib/strncpy_user.S +++ b/arch/xtensa/lib/strncpy_user.S @@ -12,6 +12,7 @@ */ #include +#include #include #include @@ -47,10 +48,8 @@ # a12/ tmp .text -.align 4 -.global __strncpy_user -.type __strncpy_user,@function -__strncpy_user: +ENTRY(__strncpy_user) + entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len mov a11, a2 # leave dst in return value register @@ -202,6 +201,7 @@ EX(10f) s8i a9, a11, 0 sub a2, a11, a2 # compute strlen retw +ENDPROC(__strncpy_user) .section .fixup, "ax" .align 4 diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S index 9404ac4..0b956ce 100644 --- a/arch/xtensa/lib/strnlen_user.S +++ b/arch/xtensa/lib/strnlen_user.S @@ -11,6 +11,7 @@ * Copyright (C) 2002 Tensilica Inc. */ +#include #include #include @@ -42,10 +43,8 @@ # a10/ tmp .text -.align 4 -.global __strnlen_user -.type __strnlen_user,@function -__strnlen_user: +ENTRY(__strnlen_user) + entry sp, 16 # minimal stack frame # a2/ s, a3/ len addi a4, a2, -4 # because we overincrement at the end; @@ -133,6 +132,8 @@ EX(10f) l32i a9, a4, 0 # get word with first two bytes of string sub a2, a4, a2 # subtract to get length retw +ENDPROC(__strnlen_user) + .section .fixup, "ax" .align 4 10: diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S index 0959b6e..64ab197 100644 --- a/arch/xtensa/lib/usercopy.S +++ b/arch/xtensa/lib/usercopy.S @@ -53,14 +53,13 @@ * a11/ original length */ +#include #include #include .text - .align 4 - .global __xtensa_copy_user - .type __xtensa_copy_user,@function -__xtensa_copy_user: +ENTRY(__xtensa_copy_user) + entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value @@ -267,6 +266,7 @@ EX(10f) s8i a6, a5, 0 movi a2, 0 # return success for len bytes copied retw +ENDPROC(__xtensa_copy_user) .section .fixup, "ax" .align 4 -- cgit v1.1 From f4431396be5b26a9960daf502d129b1b5d126f5e Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 4 Dec 2017 10:47:43 -0800 Subject: xtensa: consolidate kernel stack size related definitions Define kernel stack size in kmem_layout and use it in current_thread_info, GET_THREAD_INFO, THREAD_SIZE and THERAD_SIZE_ORDER definitions. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/current.h | 4 ++-- arch/xtensa/include/asm/kmem_layout.h | 3 +++ arch/xtensa/include/asm/ptrace.h | 3 +-- arch/xtensa/include/asm/thread_info.h | 13 +++++++------ 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h index 47e46dc..5d98a7a 100644 --- a/arch/xtensa/include/asm/current.h +++ b/arch/xtensa/include/asm/current.h @@ -11,6 +11,8 @@ #ifndef _XTENSA_CURRENT_H #define _XTENSA_CURRENT_H +#include + #ifndef __ASSEMBLY__ #include @@ -26,8 +28,6 @@ static inline struct task_struct *get_current(void) #else -#define CURRENT_SHIFT 13 - #define GET_CURRENT(reg,sp) \ GET_THREAD_INFO(reg,sp); \ l32i reg, reg, TI_TASK \ diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 561f872..28f9260 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -71,4 +71,7 @@ #endif +#define KERNEL_STACK_SHIFT 13 +#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT) + #endif diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index e2d9c5e..05beae3 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h @@ -10,6 +10,7 @@ #ifndef _XTENSA_PTRACE_H #define _XTENSA_PTRACE_H +#include #include /* @@ -38,8 +39,6 @@ * +-----------------------+ -------- */ -#define KERNEL_STACK_SIZE (2 * PAGE_SIZE) - /* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */ #define EXC_TABLE_KSTK 0x004 /* Kernel Stack */ diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 7be2400..71c9865 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -11,7 +11,9 @@ #ifndef _XTENSA_THREAD_INFO_H #define _XTENSA_THREAD_INFO_H -#ifdef __KERNEL__ +#include + +#define CURRENT_SHIFT KERNEL_STACK_SHIFT #ifndef __ASSEMBLY__ # include @@ -84,7 +86,7 @@ struct thread_info { static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; - __asm__("extui %0,a1,0,13\n\t" + __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t" "xor %0, a1, %0" : "=&r" (ti) : ); return ti; } @@ -93,7 +95,7 @@ static inline struct thread_info *current_thread_info(void) /* how to get the thread information struct from ASM */ #define GET_THREAD_INFO(reg,sp) \ - extui reg, sp, 0, 13; \ + extui reg, sp, 0, CURRENT_SHIFT; \ xor reg, sp, reg #endif @@ -130,8 +132,7 @@ static inline struct thread_info *current_thread_info(void) */ #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ -#define THREAD_SIZE 8192 //(2*PAGE_SIZE) -#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE KERNEL_STACK_SIZE +#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT) -#endif /* __KERNEL__ */ #endif /* _XTENSA_THREAD_INFO */ -- cgit v1.1 From aa6476f76c1678d5d1087b39d3047601f0139ef0 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 8 Aug 2017 14:06:14 -0700 Subject: xtensa: print hardware config ID on startup Print hardware config ID on startup and config ID recorded in the configuration if it doesn't match one read from the hardware. Signed-off-by: Max Filippov --- arch/xtensa/kernel/setup.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 253a017..3732c91 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -317,6 +317,13 @@ static inline int mem_reserve(unsigned long start, unsigned long end) void __init setup_arch(char **cmdline_p) { + pr_info("config ID: %08x:%08x\n", + get_sr(SREG_EPC), get_sr(SREG_EXCSAVE)); + if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 || + get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1) + pr_info("built for config ID: %08x:%08x\n", + XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1); + *cmdline_p = command_line; platform_setup(cmdline_p); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); @@ -582,12 +589,14 @@ c_show(struct seq_file *f, void *slot) "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" "core ID\t\t: " XCHAL_CORE_ID "\n" "build ID\t: 0x%x\n" + "config ID\t: %08x:%08x\n" "byte order\t: %s\n" "cpu MHz\t\t: %lu.%02lu\n" "bogomips\t: %lu.%02lu\n", num_online_cpus(), cpumask_pr_args(cpu_online_mask), XCHAL_BUILD_UNIQUE_ID, + get_sr(SREG_EPC), get_sr(SREG_EXCSAVE), XCHAL_HAVE_BE ? "big" : "little", ccount_freq/1000000, (ccount_freq/10000) % 100, -- cgit v1.1 From 40d1a07b333ef1f7fce11ee20b8f4281d1a75fb9 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 27 Mar 2017 02:44:47 -0700 Subject: xtensa: enable stack protector The implementation is adopted from the ARM arch. GCC 7.3, 8 or newer is required for building the xtensa kernel with SSP. Signed-off-by: Max Filippov --- .../features/debug/stackprotector/arch-support.txt | 2 +- arch/xtensa/Kconfig | 1 + arch/xtensa/boot/lib/Makefile | 4 +++ arch/xtensa/include/asm/stackprotector.h | 40 ++++++++++++++++++++++ arch/xtensa/kernel/asm-offsets.c | 3 ++ arch/xtensa/kernel/entry.S | 6 ++++ arch/xtensa/kernel/process.c | 6 ++++ 7 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 arch/xtensa/include/asm/stackprotector.h diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt index d7acd7b..59a4c9f 100644 --- a/Documentation/features/debug/stackprotector/arch-support.txt +++ b/Documentation/features/debug/stackprotector/arch-support.txt @@ -35,5 +35,5 @@ | um: | TODO | | unicore32: | TODO | | x86: | ok | - | xtensa: | TODO | + | xtensa: | ok | ----------------------- diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index eb1f196..fffe05b 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -15,6 +15,7 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile index d2a7f48..2fe1829 100644 --- a/arch/xtensa/boot/lib/Makefile +++ b/arch/xtensa/boot/lib/Makefile @@ -15,6 +15,10 @@ CFLAGS_REMOVE_inftrees.o = -pg CFLAGS_REMOVE_inffast.o = -pg endif +CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong +CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong +CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong +CFLAGS_REMOVE_inffast.o += -fstack-protector -fstack-protector-strong quiet_cmd_copy_zlib = COPY $@ cmd_copy_zlib = cat $< > $@ diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h new file mode 100644 index 0000000..e368f94 --- /dev/null +++ b/arch/xtensa/include/asm/stackprotector.h @@ -0,0 +1,40 @@ +/* + * GCC stack protector support. + * + * (This is directly adopted from the ARM implementation) + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary + * and gcc expects it to be defined by a global variable called + * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP + * we cannot have a different canary value per task. + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include +#include + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index bcb5beb..3b119b3 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -76,6 +76,9 @@ int main(void) DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); +#ifdef CONFIG_CC_STACKPROTECTOR + DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); +#endif DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); /* offsets in thread_info struct */ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index a27a9a6..5caff07 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1971,6 +1971,12 @@ ENTRY(_switch_to) s32i a1, a2, THREAD_SP # save stack pointer #endif +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + movi a6, __stack_chk_guard + l32i a8, a3, TASK_STACK_CANARY + s32i a8, a6, 0 +#endif + /* Disable ints while we manipulate the stack pointer. */ irq_save a14, a3 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index ff4f0ec..8dd0593 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -58,6 +58,12 @@ void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); +#ifdef CONFIG_CC_STACKPROTECTOR +#include +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + #if XTENSA_HAVE_COPROCESSORS void coprocessor_release_all(struct thread_info *ti) -- cgit v1.1 From c130d3be84afb9b5a30ce4f715f88a1c1dcc4114 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 15 Dec 2017 12:00:30 -0800 Subject: xtensa: clean up custom-controlled debug output Replace #ifdef'fed/commented out debug printk statements with pr_debug. Replace printk statements with pr_* equivalents. Signed-off-by: Max Filippov --- arch/xtensa/kernel/module.c | 19 +++++++--------- arch/xtensa/kernel/pci.c | 30 +++++++++---------------- arch/xtensa/kernel/setup.c | 7 +++--- arch/xtensa/kernel/signal.c | 8 ++----- arch/xtensa/kernel/traps.c | 25 ++++++++++++--------- arch/xtensa/lib/pci-auto.c | 45 ++++++++----------------------------- arch/xtensa/mm/cache.c | 3 --- arch/xtensa/mm/fault.c | 22 ++++++++---------- arch/xtensa/mm/tlb.c | 6 ++--- arch/xtensa/platforms/iss/console.c | 4 ++-- arch/xtensa/platforms/iss/network.c | 14 +++++------- 11 files changed, 66 insertions(+), 117 deletions(-) diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c index b715237..902845d 100644 --- a/arch/xtensa/kernel/module.c +++ b/arch/xtensa/kernel/module.c @@ -22,8 +22,6 @@ #include #include -#undef DEBUG_RELOCATE - static int decode_calln_opcode (unsigned char *location) { @@ -58,10 +56,9 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, unsigned char *location; uint32_t value; -#ifdef DEBUG_RELOCATE - printk("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); -#endif + pr_debug("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rela[i].r_offset; @@ -87,7 +84,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, value -= ((unsigned long)location & -4) + 4; if ((value & 3) != 0 || ((value + (1 << 19)) >> 20) != 0) { - printk("%s: relocation out of range, " + pr_err("%s: relocation out of range, " "section %d reloc %d " "sym '%s'\n", mod->name, relsec, i, @@ -111,7 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, value -= (((unsigned long)location + 3) & -4); if ((value & 3) != 0 || (signed int)value >> 18 != -1) { - printk("%s: relocation out of range, " + pr_err("%s: relocation out of range, " "section %d reloc %d " "sym '%s'\n", mod->name, relsec, i, @@ -156,7 +153,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, case R_XTENSA_SLOT12_OP: case R_XTENSA_SLOT13_OP: case R_XTENSA_SLOT14_OP: - printk("%s: unexpected FLIX relocation: %u\n", + pr_err("%s: unexpected FLIX relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; @@ -176,13 +173,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, case R_XTENSA_SLOT12_ALT: case R_XTENSA_SLOT13_ALT: case R_XTENSA_SLOT14_ALT: - printk("%s: unexpected ALT relocation: %u\n", + pr_err("%s: unexpected ALT relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; default: - printk("%s: unexpected relocation: %u\n", + pr_err("%s: unexpected relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index 903963e..d981f01 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -29,14 +29,6 @@ #include #include -#undef DEBUG - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - /* PCI Controller */ @@ -101,8 +93,8 @@ pcibios_enable_resources(struct pci_dev *dev, int mask) for(idx=0; idx<6; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) { - printk (KERN_ERR "PCI: Device %s not available because " - "of resource collisions\n", pci_name(dev)); + pr_err("PCI: Device %s not available because " + "of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) @@ -113,7 +105,7 @@ pcibios_enable_resources(struct pci_dev *dev, int mask) if (dev->resource[PCI_ROM_RESOURCE].start) cmd |= PCI_COMMAND_MEMORY; if (cmd != old_cmd) { - printk("PCI: Enabling device %s (%04x -> %04x)\n", + pr_info("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -144,8 +136,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, res = &pci_ctrl->io_resource; if (!res->flags) { if (io_offset) - printk (KERN_ERR "I/O resource not set for host" - " bridge %d\n", pci_ctrl->index); + pr_err("I/O resource not set for host bridge %d\n", + pci_ctrl->index); res->start = 0; res->end = IO_SPACE_LIMIT; res->flags = IORESOURCE_IO; @@ -159,8 +151,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, if (!res->flags) { if (i > 0) continue; - printk(KERN_ERR "Memory resource not set for " - "host bridge %d\n", pci_ctrl->index); + pr_err("Memory resource not set for host bridge %d\n", + pci_ctrl->index); res->start = 0; res->end = ~0U; res->flags = IORESOURCE_MEM; @@ -176,7 +168,7 @@ static int __init pcibios_init(void) struct pci_bus *bus; int next_busno = 0, ret; - printk("PCI: Probing PCI hardware\n"); + pr_info("PCI: Probing PCI hardware\n"); /* Scan all of the recorded PCI controllers. */ for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) { @@ -232,7 +224,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) for (idx=0; idx<6; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) { - printk(KERN_ERR "PCI: Device %s not available because " + pr_err("PCI: Device %s not available because " "of resource collisions\n", pci_name(dev)); return -EINVAL; } @@ -242,8 +234,8 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { - printk("PCI: Enabling device %s (%04x -> %04x)\n", - pci_name(dev), old_cmd, cmd); + pr_info("PCI: Enabling device %s (%04x -> %04x)\n", + pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 3732c91..cf7516c 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -156,7 +156,7 @@ static int __init parse_bootparam(const bp_tag_t* tag) /* Boot parameters must start with a BP_TAG_FIRST tag. */ if (tag->id != BP_TAG_FIRST) { - printk(KERN_WARNING "Invalid boot parameters!\n"); + pr_warn("Invalid boot parameters!\n"); return 0; } @@ -165,15 +165,14 @@ static int __init parse_bootparam(const bp_tag_t* tag) /* Parse all tags. */ while (tag != NULL && tag->id != BP_TAG_LAST) { - for (t = &__tagtable_begin; t < &__tagtable_end; t++) { + for (t = &__tagtable_begin; t < &__tagtable_end; t++) { if (tag->id == t->tag) { t->parse(tag); break; } } if (t == &__tagtable_end) - printk(KERN_WARNING "Ignoring tag " - "0x%08x\n", tag->id); + pr_warn("Ignoring tag 0x%08x\n", tag->id); tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); } diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index d427e78..f88e7a0 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -28,8 +28,6 @@ #include #include -#define DEBUG_SIG 0 - extern struct task_struct *coproc_owners[]; struct rt_sigframe @@ -399,10 +397,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, regs->areg[8] = (unsigned long) &frame->uc; regs->threadptr = tp; -#if DEBUG_SIG - printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n", - current->comm, current->pid, sig, frame, regs->pc); -#endif + pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n", + current->comm, current->pid, sig, frame, regs->pc); return 0; } diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index bae697a..9a1fef9 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -178,13 +179,14 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err) void do_unhandled(struct pt_regs *regs, unsigned long exccause) { __die_if_kernel("Caught unhandled exception - should not happen", - regs, SIGKILL); + regs, SIGKILL); /* If in user mode, send SIGILL signal to current process */ - printk("Caught unhandled exception in '%s' " - "(pid = %d, pc = %#010lx) - should not happen\n" - "\tEXCCAUSE is %ld\n", - current->comm, task_pid_nr(current), regs->pc, exccause); + pr_info_ratelimited("Caught unhandled exception in '%s' " + "(pid = %d, pc = %#010lx) - should not happen\n" + "\tEXCCAUSE is %ld\n", + current->comm, task_pid_nr(current), regs->pc, + exccause); force_sig(SIGILL, current); } @@ -305,8 +307,8 @@ do_illegal_instruction(struct pt_regs *regs) /* If in user mode, send SIGILL signal to current process. */ - printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", - current->comm, task_pid_nr(current), regs->pc); + pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", + current->comm, task_pid_nr(current), regs->pc); force_sig(SIGILL, current); } @@ -325,13 +327,14 @@ do_unaligned_user (struct pt_regs *regs) siginfo_t info; __die_if_kernel("Unhandled unaligned exception in kernel", - regs, SIGKILL); + regs, SIGKILL); current->thread.bad_vaddr = regs->excvaddr; current->thread.error_code = -3; - printk("Unaligned memory access to %08lx in '%s' " - "(pid = %d, pc = %#010lx)\n", - regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); + pr_info_ratelimited("Unaligned memory access to %08lx in '%s' " + "(pid = %d, pc = %#010lx)\n", + regs->excvaddr, current->comm, + task_pid_nr(current), regs->pc); info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c index 34d05ab..a2b5581 100644 --- a/arch/xtensa/lib/pci-auto.c +++ b/arch/xtensa/lib/pci-auto.c @@ -49,17 +49,6 @@ * */ - -/* define DEBUG to print some debugging messages. */ - -#undef DEBUG - -#ifdef DEBUG -# define DBG(x...) printk(x) -#else -# define DBG(x...) -#endif - static int pciauto_upper_iospc; static int pciauto_upper_memspc; @@ -97,7 +86,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) { bar_size &= PCI_BASE_ADDRESS_IO_MASK; upper_limit = &pciauto_upper_iospc; - DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr); + pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr); } else { @@ -107,7 +96,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) bar_size &= PCI_BASE_ADDRESS_MEM_MASK; upper_limit = &pciauto_upper_memspc; - DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr); + pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr); } /* Allocate a base address (bar_size is negative!) */ @@ -125,7 +114,8 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) if (found_mem64) pci_write_config_dword(dev, (bar+=4), 0x00000000); - DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit); + pr_debug("size=0x%x, address=0x%x\n", + ~bar_size + 1, *upper_limit); } } @@ -150,7 +140,7 @@ pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn) if (irq == -1) irq = 0; - DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); + pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } @@ -289,8 +279,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) int iosave, memsave; - DBG("PCI Autoconfig: Found P2P bridge, device %d\n", - PCI_SLOT(pci_devfn)); + pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n", + PCI_SLOT(pci_devfn)); /* Allocate PCI I/O and/or memory space */ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); @@ -306,23 +296,6 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) } - -#if 0 - /* Skip legacy mode IDE controller */ - - if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) { - - unsigned char prg_iface; - pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface); - - if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) { - DBG("PCI Autoconfig: Skipping legacy mode " - "IDE controller\n"); - continue; - } - } -#endif - /* * Found a peripheral, enable some standard * settings @@ -337,8 +310,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); /* Allocate PCI I/O and/or memory space */ - DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", - current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) ); + pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", + current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn)); pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); pciauto_setup_irq(pci_ctrl, dev, pci_devfn); diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 3c75c4e..57dc231 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -33,9 +33,6 @@ #include #include -//#define printd(x...) printk(x) -#define printd(x...) do { } while(0) - /* * Note: * The kernel provides one architecture bit PG_arch_1 in the page flags that diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index a14df5a..8b9b6f4 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -25,8 +25,6 @@ DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; void bad_page_fault(struct pt_regs*, unsigned long, int); -#undef DEBUG_PAGE_FAULT - /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -68,10 +66,10 @@ void do_page_fault(struct pt_regs *regs) exccause == EXCCAUSE_ITLB_MISS || exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; -#ifdef DEBUG_PAGE_FAULT - printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, - address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); -#endif + pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n", + current->comm, current->pid, + address, exccause, regs->pc, + is_write ? "w" : "", is_exec ? "x" : ""); if (user_mode(regs)) flags |= FAULT_FLAG_USER; @@ -247,10 +245,8 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) /* Are we prepared to handle this kernel fault? */ if ((entry = search_exception_tables(regs->pc)) != NULL) { -#ifdef DEBUG_PAGE_FAULT - printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", - current->comm, regs->pc, entry->fixup); -#endif + pr_debug("%s: Exception at pc=%#010lx (%lx)\n", + current->comm, regs->pc, entry->fixup); current->thread.bad_uaddr = address; regs->pc = entry->fixup; return; @@ -259,9 +255,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) /* Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - printk(KERN_ALERT "Unable to handle kernel paging request at virtual " - "address %08lx\n pc = %08lx, ra = %08lx\n", - address, regs->pc, regs->areg[0]); + pr_alert("Unable to handle kernel paging request at virtual " + "address %08lx\n pc = %08lx, ra = %08lx\n", + address, regs->pc, regs->areg[0]); die("Oops", regs, sig); do_exit(sig); } diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 35c8222..59153d0 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -95,10 +95,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma, if (mm->context.asid[cpu] == NO_CONTEXT) return; -#if 0 - printk("[tlbrange<%02lx,%08lx,%08lx>]\n", - (unsigned long)mm->context.asid[cpu], start, end); -#endif + pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n", + (unsigned long)mm->context.asid[cpu], start, end); local_irq_save(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index 0140a22..6fc0a94 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c @@ -186,7 +186,7 @@ int __init rs_init(void) serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); - printk ("%s %s\n", serial_name, serial_version); + pr_info("%s %s\n", serial_name, serial_version); /* Initialize the tty_driver structure */ @@ -215,7 +215,7 @@ static __exit void rs_exit(void) int error; if ((error = tty_unregister_driver(serial_driver))) - printk("ISS_SERIAL: failed to unregister serial driver (%d)\n", + pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n", error); put_tty_driver(serial_driver); tty_port_destroy(&serial_port); diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 66a5d15..538d17e 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -16,6 +16,8 @@ * */ +#define pr_fmt(fmt) "%s: " fmt, __func__ + #include #include #include @@ -609,8 +611,6 @@ struct iss_net_init { * those fields. They will be later initialized in iss_net_init. */ -#define ERR KERN_ERR "iss_net_setup: " - static int __init iss_net_setup(char *str) { struct iss_net_private *device = NULL; @@ -622,14 +622,14 @@ static int __init iss_net_setup(char *str) end = strchr(str, '='); if (!end) { - printk(ERR "Expected '=' after device number\n"); + pr_err("Expected '=' after device number\n"); return 1; } *end = 0; rc = kstrtouint(str, 0, &n); *end = '='; if (rc < 0) { - printk(ERR "Failed to parse '%s'\n", str); + pr_err("Failed to parse '%s'\n", str); return 1; } str = end; @@ -645,13 +645,13 @@ static int __init iss_net_setup(char *str) spin_unlock(&devices_lock); if (device && device->index == n) { - printk(ERR "Device %u already configured\n", n); + pr_err("Device %u already configured\n", n); return 1; } new = alloc_bootmem(sizeof(*new)); if (new == NULL) { - printk(ERR "Alloc_bootmem failed\n"); + pr_err("Alloc_bootmem failed\n"); return 1; } @@ -663,8 +663,6 @@ static int __init iss_net_setup(char *str) return 1; } -#undef ERR - __setup("eth", iss_net_setup); /* -- cgit v1.1 From f21a79cab3773bc17aa845b7738c7f200778a260 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 15 Dec 2017 16:08:16 -0800 Subject: xtensa: clean up exception handling structure Instead of using flat array of longs use normal C structure and generate EXC_TABLE_* constants in the asm-offsets.c Signed-off-by: Max Filippov --- arch/xtensa/include/asm/ptrace.h | 12 ------------ arch/xtensa/include/asm/regs.h | 1 + arch/xtensa/include/asm/traps.h | 23 +++++++++++++++++++++++ arch/xtensa/kernel/asm-offsets.c | 13 +++++++++++++ arch/xtensa/kernel/traps.c | 39 +++++++++++++++++++-------------------- 5 files changed, 56 insertions(+), 32 deletions(-) diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index 05beae3..3a5c591 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h @@ -39,18 +39,6 @@ * +-----------------------+ -------- */ -/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */ - -#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */ -#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */ -#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */ -#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */ -#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */ -#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */ -#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */ -#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */ -#define EXC_TABLE_SIZE 0x400 - #ifndef __ASSEMBLY__ #include diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h index 881a113..477594e 100644 --- a/arch/xtensa/include/asm/regs.h +++ b/arch/xtensa/include/asm/regs.h @@ -76,6 +76,7 @@ #define EXCCAUSE_COPROCESSOR5_DISABLED 37 #define EXCCAUSE_COPROCESSOR6_DISABLED 38 #define EXCCAUSE_COPROCESSOR7_DISABLED 39 +#define EXCCAUSE_N 64 /* PS register fields. */ diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 2e69aa4..5bd19709 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h @@ -13,6 +13,29 @@ #include /* + * Per-CPU exception handling data structure. + * EXCSAVE1 points to it. + */ +struct exc_table { + /* Kernel Stack */ + void *kstk; + /* Double exception save area for a0 */ + unsigned long double_save; + /* Fixup handler */ + void *fixup; + /* For passing a parameter to fixup */ + void *fixup_param; + /* For fast syscall handler */ + unsigned long syscall_save; + /* Fast user exception handlers */ + void *fast_user_handler[EXCCAUSE_N]; + /* Fast kernel exception handlers */ + void *fast_kernel_handler[EXCCAUSE_N]; + /* Default C-Handlers */ + void *default_handler[EXCCAUSE_N]; +}; + +/* * handler must be either of the following: * void (*)(struct pt_regs *regs); * void (*)(struct pt_regs *regs, unsigned long exccause); diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 3b119b3..022cf91 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -132,5 +132,18 @@ int main(void) offsetof(struct debug_table, icount_level_save)); #endif + /* struct exc_table */ + DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk)); + DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save)); + DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup)); + DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param)); + DEFINE(EXC_TABLE_SYSCALL_SAVE, + offsetof(struct exc_table, syscall_save)); + DEFINE(EXC_TABLE_FAST_USER, + offsetof(struct exc_table, fast_user_handler)); + DEFINE(EXC_TABLE_FAST_KERNEL, + offsetof(struct exc_table, fast_kernel_handler)); + DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler)); + return 0; } diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 9a1fef9..32c5207 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -159,8 +159,7 @@ COPROCESSOR(7), * 2. it is a temporary memory buffer for the exception handlers. */ -DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]); - +DEFINE_PER_CPU(struct exc_table, exc_table); DEFINE_PER_CPU(struct debug_table, debug_table); void die(const char*, struct pt_regs*, long); @@ -368,28 +367,28 @@ do_debug(struct pt_regs *regs) } -static void set_handler(int idx, void *handler) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) - per_cpu(exc_table, cpu)[idx] = (unsigned long)handler; -} +#define set_handler(type, cause, handler) \ + do { \ + unsigned int cpu; \ + \ + for_each_possible_cpu(cpu) \ + per_cpu(exc_table, cpu).type[cause] = (handler);\ + } while (0) /* Set exception C handler - for temporary use when probing exceptions */ void * __init trap_set_handler(int cause, void *handler) { - void *previous = (void *)per_cpu(exc_table, 0)[ - EXC_TABLE_DEFAULT / 4 + cause]; - set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler); + void *previous = per_cpu(exc_table, 0).default_handler[cause]; + + set_handler(default_handler, cause, handler); return previous; } static void trap_init_excsave(void) { - unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); + unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table); __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); } @@ -421,10 +420,10 @@ void __init trap_init(void) /* Setup default vectors. */ - for(i = 0; i < 64; i++) { - set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception); - set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception); - set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled); + for (i = 0; i < EXCCAUSE_N; i++) { + set_handler(fast_user_handler, i, user_exception); + set_handler(fast_kernel_handler, i, kernel_exception); + set_handler(default_handler, i, do_unhandled); } /* Setup specific handlers. */ @@ -436,11 +435,11 @@ void __init trap_init(void) void *handler = dispatch_init_table[i].handler; if (fast == 0) - set_handler (EXC_TABLE_DEFAULT/4 + cause, handler); + set_handler(default_handler, cause, handler); if (fast && fast & USER) - set_handler (EXC_TABLE_FAST_USER/4 + cause, handler); + set_handler(fast_user_handler, cause, handler); if (fast && fast & KRNL) - set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler); + set_handler(fast_kernel_handler, cause, handler); } /* Initialize EXCSAVE_1 to hold the address of the exception table. */ -- cgit v1.1 From 501c26e82df8d253851b80082778eeb37e4bab5c Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 15 Dec 2017 16:20:56 -0800 Subject: xtensa: implement early_trap_init Paging on xtensa architecture requires functioning exception handling because hardware cannot transparently access page tables that are not currently mapped by TLB. Exception handling is set up late in the initialization process, but working paging is needed for KASAN. Provide early_trap_init that sets up minimal exception handling sufficient for KASAN to work. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/traps.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 5bd19709..f5cd7a7 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h @@ -42,6 +42,18 @@ struct exc_table { */ extern void * __init trap_set_handler(int cause, void *handler); extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); +void fast_second_level_miss(void); + +/* Initialize minimal exc_table structure sufficient for basic paging */ +static inline void __init early_trap_init(void) +{ + static struct exc_table exc_table __initdata = { + .fast_kernel_handler[EXCCAUSE_DTLB_MISS] = + fast_second_level_miss, + }; + __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table)); +} + void secondary_trap_init(void); static inline void spill_registers(void) -- cgit v1.1 From c2edb35ae342fedb5a39312c0fa676b74973887a Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 15 Dec 2017 20:45:35 -0800 Subject: xtensa: extract init_kio KIO region placement may be specified in the device tree, that's why it's initialized with the rest of MMU after the early_init_devtree. In order to support KASAN the MMU must be initialized earlier. Separate KIO initialization from the rest of MMU initialization. Reinitialize KIO if its location is specified in the device tree. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/mmu_context.h | 1 + arch/xtensa/include/asm/nommu_context.h | 4 ++++ arch/xtensa/kernel/setup.c | 10 ++++++---- arch/xtensa/mm/mmu.c | 30 +++++++++++++++++------------- 4 files changed, 28 insertions(+), 17 deletions(-) diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index f7e186d..de5e6cb 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h @@ -52,6 +52,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache); #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) void init_mmu(void); +void init_kio(void); static inline void set_rasid_register (unsigned long val) { diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h index 2cebdbb..37251b2 100644 --- a/arch/xtensa/include/asm/nommu_context.h +++ b/arch/xtensa/include/asm/nommu_context.h @@ -3,6 +3,10 @@ static inline void init_mmu(void) { } +static inline void init_kio(void) +{ +} + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index cf7516c..960212e 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -207,6 +207,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname, /* round down to nearest 256MB boundary */ xtensa_kio_paddr &= 0xf0000000; + init_kio(); + return 1; } #else @@ -245,6 +247,10 @@ void __init early_init_devtree(void *params) void __init init_arch(bp_tag_t *bp_start) { + /* Initialize MMU. */ + + init_mmu(); + /* Parse boot parameters */ if (bp_start) @@ -262,10 +268,6 @@ void __init init_arch(bp_tag_t *bp_start) /* Early hook for platforms */ platform_init(bp_start); - - /* Initialize MMU. */ - - init_mmu(); } /* diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 358d748..54c01e3 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -82,6 +82,23 @@ void init_mmu(void) set_itlbcfg_register(0); set_dtlbcfg_register(0); #endif + init_kio(); + local_flush_tlb_all(); + + /* Set rasid register to a known value. */ + + set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); + + /* Set PTEVADDR special register to the start of the page + * table, which is in kernel mappable space (ie. not + * statically mapped). This register's value is undefined on + * reset. + */ + set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR); +} + +void init_kio(void) +{ #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) /* * Update the IO area mapping in case xtensa_kio_paddr has changed @@ -95,17 +112,4 @@ void init_mmu(void) write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), XCHAL_KIO_BYPASS_VADDR + 6); #endif - - local_flush_tlb_all(); - - /* Set rasid register to a known value. */ - - set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); - - /* Set PTEVADDR special register to the start of the page - * table, which is in kernel mappable space (ie. not - * statically mapped). This register's value is undefined on - * reset. - */ - set_ptevaddr_register(PGTABLE_START); } -- cgit v1.1 From d4e337fe822354895334dbaded61f08206dcac25 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 15 Dec 2017 20:46:55 -0800 Subject: xtensa: don't clear swapper_pg_dir in paging_init swapper_pg_dir is located in the .bss, so it's zero-initialized anyway. With KASAN enabled paging_init will be called after KASAN initialization, it must not erase page directory entries set up for KASAN shadow map. Signed-off-by: Max Filippov --- arch/xtensa/mm/mmu.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 54c01e3..9d1ecfc 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -56,7 +56,6 @@ static void __init fixedrange_init(void) void __init paging_init(void) { - memset(swapper_pg_dir, 0, PAGE_SIZE); #ifdef CONFIG_HIGHMEM fixedrange_init(); pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); -- cgit v1.1 From 1af1e8a39dc0fab5e50f10462c636da8c1e0cfbb Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 3 Dec 2017 19:09:41 -0800 Subject: xtensa: move fixmap and kmap just above the KSEG The virtual address space between the page table and the VMALLOC region is big enough to host KASAN shadow map and there's enough space between the VMALLOC area and KSEG for the fixmap and kmap. Move fixmap and kmap to the gap between VMALLOC area and KSEG, just above the KSEG. Reorder entries in the kernel memory layout printing code. Drop duplicate PGTABLE_START definition, use XCHAL_PAGE_TABLE_VADDR instead. Signed-off-by: Max Filippov --- Documentation/xtensa/mmu.txt | 72 +++++++++++++++++++-------------------- arch/xtensa/include/asm/fixmap.h | 4 +-- arch/xtensa/include/asm/highmem.h | 2 +- arch/xtensa/include/asm/page.h | 2 -- arch/xtensa/mm/init.c | 12 +++---- 5 files changed, 45 insertions(+), 47 deletions(-) diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt index 5de8715..1692139 100644 --- a/Documentation/xtensa/mmu.txt +++ b/Documentation/xtensa/mmu.txt @@ -69,19 +69,8 @@ Default MMUv2-compatible layout. | Userspace | 0x00000000 TASK_SIZE +------------------+ 0x40000000 +------------------+ -| Page table | 0x80000000 -+------------------+ 0x80400000 +| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ -| KMAP area | PKMAP_BASE PTRS_PER_PTE * -| | DCACHE_N_COLORS * -| | PAGE_SIZE -| | (4MB * DCACHE_N_COLORS) -+------------------+ -| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * -| | NR_CPUS * -| | DCACHE_N_COLORS * -| | PAGE_SIZE -+------------------+ FIXADDR_TOP 0xbffff000 +------------------+ | VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB +------------------+ VMALLOC_END @@ -92,6 +81,17 @@ Default MMUv2-compatible layout. | remap area 2 | +------------------+ +------------------+ +| KMAP area | PKMAP_BASE PTRS_PER_PTE * +| | DCACHE_N_COLORS * +| | PAGE_SIZE +| | (4MB * DCACHE_N_COLORS) ++------------------+ +| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * +| | NR_CPUS * +| | DCACHE_N_COLORS * +| | PAGE_SIZE ++------------------+ FIXADDR_TOP 0xcffff000 ++------------------+ | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB +------------------+ | Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB @@ -109,19 +109,8 @@ Default MMUv2-compatible layout. | Userspace | 0x00000000 TASK_SIZE +------------------+ 0x40000000 +------------------+ -| Page table | 0x80000000 -+------------------+ 0x80400000 +| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ -| KMAP area | PKMAP_BASE PTRS_PER_PTE * -| | DCACHE_N_COLORS * -| | PAGE_SIZE -| | (4MB * DCACHE_N_COLORS) -+------------------+ -| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * -| | NR_CPUS * -| | DCACHE_N_COLORS * -| | PAGE_SIZE -+------------------+ FIXADDR_TOP 0x9ffff000 +------------------+ | VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB +------------------+ VMALLOC_END @@ -132,6 +121,17 @@ Default MMUv2-compatible layout. | remap area 2 | +------------------+ +------------------+ +| KMAP area | PKMAP_BASE PTRS_PER_PTE * +| | DCACHE_N_COLORS * +| | PAGE_SIZE +| | (4MB * DCACHE_N_COLORS) ++------------------+ +| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * +| | NR_CPUS * +| | DCACHE_N_COLORS * +| | PAGE_SIZE ++------------------+ FIXADDR_TOP 0xaffff000 ++------------------+ | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB +------------------+ | Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB @@ -150,19 +150,8 @@ Default MMUv2-compatible layout. | Userspace | 0x00000000 TASK_SIZE +------------------+ 0x40000000 +------------------+ -| Page table | 0x80000000 -+------------------+ 0x80400000 +| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ -| KMAP area | PKMAP_BASE PTRS_PER_PTE * -| | DCACHE_N_COLORS * -| | PAGE_SIZE -| | (4MB * DCACHE_N_COLORS) -+------------------+ -| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * -| | NR_CPUS * -| | DCACHE_N_COLORS * -| | PAGE_SIZE -+------------------+ FIXADDR_TOP 0x8ffff000 +------------------+ | VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB +------------------+ VMALLOC_END @@ -173,6 +162,17 @@ Default MMUv2-compatible layout. | remap area 2 | +------------------+ +------------------+ +| KMAP area | PKMAP_BASE PTRS_PER_PTE * +| | DCACHE_N_COLORS * +| | PAGE_SIZE +| | (4MB * DCACHE_N_COLORS) ++------------------+ +| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * +| | NR_CPUS * +| | DCACHE_N_COLORS * +| | PAGE_SIZE ++------------------+ FIXADDR_TOP 0x9ffff000 ++------------------+ | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB +------------------+ | Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index 0d30403..7e25c1b 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -44,7 +44,7 @@ enum fixed_addresses { __end_of_fixed_addresses }; -#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE) +#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) @@ -63,7 +63,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) * table. */ BUILD_BUG_ON(FIXADDR_START < - XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); + TLBTEMP_BASE_1 + TLBTEMP_SIZE); BUILD_BUG_ON(idx >= __end_of_fixed_addresses); return __fix_to_virt(idx); } diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 6e070db..04e9340 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -72,7 +72,7 @@ static inline void *kmap(struct page *page) * page table. */ BUILD_BUG_ON(PKMAP_BASE < - XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); + TLBTEMP_BASE_1 + TLBTEMP_SIZE); BUG_ON(in_interrupt()); if (!PageHighMem(page)) return page_address(page); diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 4ddbfd5..5d69c11 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -36,8 +36,6 @@ #define MAX_LOW_PFN PHYS_PFN(0xfffffffful) #endif -#define PGTABLE_START 0x80000000 - /* * Cache aliasing: * diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 720fe4e..6fc1cb0 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -100,23 +100,23 @@ void __init mem_init(void) mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" +#ifdef CONFIG_MMU + " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" +#endif #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" #endif -#ifdef CONFIG_MMU - " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" -#endif " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", +#ifdef CONFIG_MMU + VMALLOC_START, VMALLOC_END, + (VMALLOC_END - VMALLOC_START) >> 20, #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, #endif -#ifdef CONFIG_MMU - VMALLOC_START, VMALLOC_END, - (VMALLOC_END - VMALLOC_START) >> 20, PAGE_OFFSET, PAGE_OFFSET + (max_low_pfn - min_low_pfn) * PAGE_SIZE, #else -- cgit v1.1 From c633544a6154146a210cf158157a1ae7c55473b6 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 3 Dec 2017 13:28:52 -0800 Subject: xtensa: add support for KASAN Cover kernel addresses above 0x90000000 by the shadow map. Enable HAVE_ARCH_KASAN when MMU is enabled. Provide kasan_early_init that fills shadow map with writable copies of kasan_zero_page. Call kasan_early_init right after mmu initialization in the setup_arch. Provide kasan_init that allocates proper shadow map pages from the memblock and puts these pages into the shadow map for addresses from VMALLOC area to the end of KSEG. Call kasan_init right after memblock initialization. Don't use KASAN for the boot code, MMU and KASAN initialization and page fault handler. Make kernel stack size 4 times larger when KASAN is enabled to avoid stack overflows. GCC 7.3, 8 or newer is required to build the xtensa kernel with KASAN. Signed-off-by: Max Filippov --- .../features/debug/KASAN/arch-support.txt | 2 +- Documentation/xtensa/mmu.txt | 6 ++ arch/xtensa/Kconfig | 5 ++ arch/xtensa/boot/lib/Makefile | 2 + arch/xtensa/include/asm/kasan.h | 37 +++++++++ arch/xtensa/include/asm/kmem_layout.h | 4 + arch/xtensa/include/asm/pgtable.h | 3 +- arch/xtensa/include/asm/string.h | 19 +++++ arch/xtensa/kernel/setup.c | 7 +- arch/xtensa/kernel/xtensa_ksyms.c | 3 + arch/xtensa/lib/memcopy.S | 10 ++- arch/xtensa/lib/memset.S | 5 +- arch/xtensa/mm/Makefile | 5 ++ arch/xtensa/mm/init.c | 7 ++ arch/xtensa/mm/kasan_init.c | 95 ++++++++++++++++++++++ 15 files changed, 201 insertions(+), 9 deletions(-) create mode 100644 arch/xtensa/include/asm/kasan.h create mode 100644 arch/xtensa/mm/kasan_init.c diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt index 76bbd7f..8abb013 100644 --- a/Documentation/features/debug/KASAN/arch-support.txt +++ b/Documentation/features/debug/KASAN/arch-support.txt @@ -35,5 +35,5 @@ | um: | TODO | | unicore32: | TODO | | x86: | ok | - | xtensa: | TODO | + | xtensa: | ok | ----------------------- diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt index 1692139..318114d 100644 --- a/Documentation/xtensa/mmu.txt +++ b/Documentation/xtensa/mmu.txt @@ -71,6 +71,8 @@ Default MMUv2-compatible layout. +------------------+ | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ +| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE ++------------------+ 0x8e400000 +------------------+ | VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB +------------------+ VMALLOC_END @@ -111,6 +113,8 @@ Default MMUv2-compatible layout. +------------------+ | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ +| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE ++------------------+ 0x8e400000 +------------------+ | VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB +------------------+ VMALLOC_END @@ -152,6 +156,8 @@ Default MMUv2-compatible layout. +------------------+ | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ +| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE ++------------------+ 0x8e400000 +------------------+ | VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB +------------------+ VMALLOC_END diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index fffe05b..f9f95d6 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -15,6 +15,7 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select HAVE_ARCH_KASAN if MMU select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG @@ -80,6 +81,10 @@ config VARIANT_IRQ_SWITCH config HAVE_XTENSA_GPIO32 def_bool n +config KASAN_SHADOW_OFFSET + hex + default 0x6e400000 + menu "Processor type and features" choice diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile index 2fe1829..355127f 100644 --- a/arch/xtensa/boot/lib/Makefile +++ b/arch/xtensa/boot/lib/Makefile @@ -15,6 +15,8 @@ CFLAGS_REMOVE_inftrees.o = -pg CFLAGS_REMOVE_inffast.o = -pg endif +KASAN_SANITIZE := n + CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h new file mode 100644 index 0000000..54be808 --- /dev/null +++ b/arch/xtensa/include/asm/kasan.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_KASAN + +#include +#include +#include + +/* Start of area covered by KASAN */ +#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000) +/* Start of the shadow map */ +#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE) +/* Size of the shadow map */ +#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT) +/* Offset for mem to shadow address transformation */ +#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET) + +void __init kasan_early_init(void); +void __init kasan_init(void); + +#else + +static inline void kasan_early_init(void) +{ +} + +static inline void kasan_init(void) +{ +} + +#endif +#endif +#endif diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 28f9260..2317c83 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -71,7 +71,11 @@ #endif +#ifndef CONFIG_KASAN #define KERNEL_STACK_SHIFT 13 +#else +#define KERNEL_STACK_SHIFT 15 +#endif #define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT) #endif diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 30dd5b2..3880225 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -12,9 +12,9 @@ #define _XTENSA_PGTABLE_H #define __ARCH_USE_5LEVEL_HACK -#include #include #include +#include /* * We only use two ring levels, user and kernel space. @@ -170,6 +170,7 @@ #define PAGE_SHARED_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) +#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) #if (DCACHE_WAY_SIZE > PAGE_SIZE) diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h index 8d5d9df..586bad9fe 100644 --- a/arch/xtensa/include/asm/string.h +++ b/arch/xtensa/include/asm/string.h @@ -108,14 +108,33 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n) #define __HAVE_ARCH_MEMSET extern void *memset(void *__s, int __c, size_t __count); +extern void *__memset(void *__s, int __c, size_t __count); #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *__to, __const__ void *__from, size_t __n); +extern void *__memcpy(void *__to, __const__ void *__from, size_t __n); #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *__dest, __const__ void *__src, size_t __n); +extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); /* Don't build bcopy at all ... */ #define __HAVE_ARCH_BCOPY +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) + +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * should use not instrumented version of mem* functions. + */ + +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif +#endif + #endif /* _XTENSA_STRING_H */ diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 960212e..a931af9 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -36,6 +36,7 @@ #endif #include +#include #include #include #include @@ -251,6 +252,10 @@ void __init init_arch(bp_tag_t *bp_start) init_mmu(); + /* Initialize initial KASAN shadow map */ + + kasan_early_init(); + /* Parse boot parameters */ if (bp_start) @@ -388,7 +393,7 @@ void __init setup_arch(char **cmdline_p) #endif parse_early_param(); bootmem_init(); - + kasan_init(); unflatten_and_copy_device_tree(); #ifdef CONFIG_SMP diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 6723910..3a443f8 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -41,6 +41,9 @@ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(__strncpy_user); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S index 24d6508..c0f6981 100644 --- a/arch/xtensa/lib/memcopy.S +++ b/arch/xtensa/lib/memcopy.S @@ -109,7 +109,8 @@ addi a5, a5, 2 j .Ldstaligned # dst is now aligned, return to main algorithm -ENTRY(memcpy) +ENTRY(__memcpy) +WEAK(memcpy) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len @@ -271,7 +272,7 @@ ENTRY(memcpy) s8i a6, a5, 0 retw -ENDPROC(memcpy) +ENDPROC(__memcpy) /* * void bcopy(const void *src, void *dest, size_t n); @@ -376,7 +377,8 @@ ENDPROC(bcopy) j .Lbackdstaligned # dst is now aligned, # return to main algorithm -ENTRY(memmove) +ENTRY(__memmove) +WEAK(memmove) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len @@ -548,4 +550,4 @@ ENTRY(memmove) s8i a6, a5, 0 retw -ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S index a6cd04b..276747d 100644 --- a/arch/xtensa/lib/memset.S +++ b/arch/xtensa/lib/memset.S @@ -31,7 +31,8 @@ */ .text -ENTRY(memset) +ENTRY(__memset) +WEAK(memset) entry sp, 16 # minimal stack frame # a2/ dst, a3/ c, a4/ length @@ -140,7 +141,7 @@ EX(10f) s8i a3, a5, 0 .Lbytesetdone: retw -ENDPROC(memset) +ENDPROC(__memset) .section .fixup, "ax" .align 4 diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile index 0b3d296..734888a 100644 --- a/arch/xtensa/mm/Makefile +++ b/arch/xtensa/mm/Makefile @@ -5,3 +5,8 @@ obj-y := init.o misc.o obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o obj-$(CONFIG_HIGHMEM) += highmem.o +obj-$(CONFIG_KASAN) += kasan_init.o + +KASAN_SANITIZE_fault.o := n +KASAN_SANITIZE_kasan_init.o := n +KASAN_SANITIZE_mmu.o := n diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 6fc1cb0..0d980f0 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -100,6 +100,9 @@ void __init mem_init(void) mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" +#ifdef CONFIG_KASAN + " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" +#endif #ifdef CONFIG_MMU " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" #endif @@ -108,6 +111,10 @@ void __init mem_init(void) " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" #endif " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", +#ifdef CONFIG_KASAN + KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE, + KASAN_SHADOW_SIZE >> 20, +#endif #ifdef CONFIG_MMU VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c new file mode 100644 index 0000000..6b532b6 --- /dev/null +++ b/arch/xtensa/mm/kasan_init.c @@ -0,0 +1,95 @@ +/* + * Xtensa KASAN shadow map initialization + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2017 Cadence Design Systems Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +void __init kasan_early_init(void) +{ + unsigned long vaddr = KASAN_SHADOW_START; + pgd_t *pgd = pgd_offset_k(vaddr); + pmd_t *pmd = pmd_offset(pgd, vaddr); + int i; + + for (i = 0; i < PTRS_PER_PTE; ++i) + set_pte(kasan_zero_pte + i, + mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL)); + + for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { + BUG_ON(!pmd_none(*pmd)); + set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte)); + } + early_trap_init(); +} + +static void __init populate(void *start, void *end) +{ + unsigned long n_pages = (end - start) / PAGE_SIZE; + unsigned long n_pmds = n_pages / PTRS_PER_PTE; + unsigned long i, j; + unsigned long vaddr = (unsigned long)start; + pgd_t *pgd = pgd_offset_k(vaddr); + pmd_t *pmd = pmd_offset(pgd, vaddr); + pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); + + pr_debug("%s: %p - %p\n", __func__, start, end); + + for (i = j = 0; i < n_pmds; ++i) { + int k; + + for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { + phys_addr_t phys = + memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, + MEMBLOCK_ALLOC_ANYWHERE); + + set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); + } + } + + for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) + set_pmd(pmd + i, __pmd((unsigned long)pte)); + + local_flush_tlb_all(); + memset(start, 0, end - start); +} + +void __init kasan_init(void) +{ + int i; + + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - + (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); + BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); + + /* + * Replace shadow map pages that cover addresses from VMALLOC area + * start to the end of KSEG with clean writable pages. + */ + populate(kasan_mem_to_shadow((void *)VMALLOC_START), + kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); + + /* Write protect kasan_zero_page and zero-initialize it again. */ + for (i = 0; i < PTRS_PER_PTE; ++i) + set_pte(kasan_zero_pte + i, + mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO)); + + local_flush_tlb_all(); + memset(kasan_zero_page, 0, PAGE_SIZE); + + /* At this point kasan is fully initialized. Enable error messages. */ + current->kasan_depth = 0; + pr_info("KernelAddressSanitizer initialized\n"); +} -- cgit v1.1 From e0baa01438d3fa3979f94f98be19ca3df88e0b7c Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 4 Dec 2017 21:20:33 -0800 Subject: xtensa: use __memset in __xtensa_clear_user memset on xtensa is capable of accessing user memory, but KASAN checks if memset function is actually used for that and reports it as an error: ================================================================== BUG: KASAN: user-memory-access in padzero+0x4d/0x58 Write of size 519 at addr 0049ddf9 by task init/1 Call Trace: [] kasan_report+0x160/0x238 [] check_memory_region+0xf8/0x100 [] memset+0x20/0x34 [] padzero+0x4d/0x58 ================================================================== Use __memset in __xtensa_clear_user to avoid that. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index b8f152b..18bbe1c 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -261,7 +261,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __xtensa_clear_user(void *addr, unsigned long size) { - if ( ! memset(addr, 0, size) ) + if (!__memset(addr, 0, size)) return size; return 0; } -- cgit v1.1 From 57358ba9564a0520f870dc14a0f91e7dacc18236 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 17 Dec 2017 14:43:15 -0800 Subject: xtensa: use generic strncpy_from_user with KASAN This enables KASAN check of the destination buffer. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 1 + arch/xtensa/include/asm/uaccess.h | 7 +++++++ arch/xtensa/kernel/xtensa_ksyms.c | 2 ++ 3 files changed, 10 insertions(+) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index f9f95d6..e2afffb 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -15,6 +15,7 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select GENERIC_STRNCPY_FROM_USER if KASAN select HAVE_ARCH_KASAN if MMU select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 18bbe1c..f1158b4 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -44,6 +44,8 @@ #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) + /* * These are the main single-value transfer routines. They * automatically use the right size if we just have the right pointer @@ -277,6 +279,8 @@ clear_user(void *addr, unsigned long size) #define __clear_user __xtensa_clear_user +#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER + extern long __strncpy_user(char *, const char *, long); static inline long @@ -286,6 +290,9 @@ strncpy_from_user(char *dst, const char *src, long count) return __strncpy_user(dst, src, count); return -EFAULT; } +#else +long strncpy_from_user(char *dst, const char *src, long count); +#endif /* * Return the size of a string (including the ending 0!) diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 3a443f8..04f19de 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -44,7 +44,9 @@ EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memmove); +#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER EXPORT_SYMBOL(__strncpy_user); +#endif EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); -- cgit v1.1 From fed566ca44ce99ff4604e3af941049f9a6bba405 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 10 Dec 2017 20:15:37 -0800 Subject: xtensa: print kernel sections info in mem_init Output virtual addresses and sizes occupied by the main kernel sections: .text, .rodata, .data, .init and .bss. Signed-off-by: Max Filippov --- arch/xtensa/mm/init.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 0d980f0..d776ec0 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -110,7 +110,12 @@ void __init mem_init(void) " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" #endif - " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", + " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n" + " .text : 0x%08lx - 0x%08lx (%5lu kB)\n" + " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n" + " .data : 0x%08lx - 0x%08lx (%5lu kB)\n" + " .init : 0x%08lx - 0x%08lx (%5lu kB)\n" + " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n", #ifdef CONFIG_KASAN KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE, KASAN_SHADOW_SIZE >> 20, @@ -129,7 +134,17 @@ void __init mem_init(void) #else min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE, #endif - ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); + ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20, + (unsigned long)_text, (unsigned long)_etext, + (unsigned long)(_etext - _text) >> 10, + (unsigned long)__start_rodata, (unsigned long)_sdata, + (unsigned long)(_sdata - __start_rodata) >> 10, + (unsigned long)_sdata, (unsigned long)_edata, + (unsigned long)(_edata - _sdata) >> 10, + (unsigned long)__init_begin, (unsigned long)__init_end, + (unsigned long)(__init_end - __init_begin) >> 10, + (unsigned long)__bss_start, (unsigned long)__bss_stop, + (unsigned long)(__bss_stop - __bss_start) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD -- cgit v1.1 From 32bb954dbf6db98562cb4477608dc546421caaf6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 2 Jan 2018 12:00:22 +0100 Subject: xtensa: shut up gcc-8 warnings Many uses of strncpy() on xtensa causes a warning like arch/xtensa/include/asm/string.h:56:42: warning: array subscript is above array bounds [-Warray-bounds] : "0" (__dest), "1" (__src), "r" (__src+__n) This avoids the warning by turning the pointer arithmetic into an integer operation that does not get checked the same way. Signed-off-by: Arnd Bergmann Signed-off-by: Max Filippov --- arch/xtensa/include/asm/string.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h index 586bad9fe..89b51a0 100644 --- a/arch/xtensa/include/asm/string.h +++ b/arch/xtensa/include/asm/string.h @@ -53,7 +53,7 @@ static inline char *strncpy(char *__dest, const char *__src, size_t __n) "bne %1, %5, 1b\n" "2:" : "=r" (__dest), "=r" (__src), "=&r" (__dummy) - : "0" (__dest), "1" (__src), "r" (__src+__n) + : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n) : "memory"); return __xdest; @@ -101,7 +101,7 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n) "2:\n\t" "sub %2, %2, %3" : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) - : "0" (__cs), "1" (__ct), "r" (__cs+__n)); + : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n)); return __res; } -- cgit v1.1 From ca47480921587ae30417dd234a9f79af188e3666 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 5 Jan 2018 14:27:58 -0800 Subject: xtensa: fix futex_atomic_cmpxchg_inatomic Return 0 if the operation was successful, not the userspace memory value. Check that userspace value equals passed oldval, not itself. Don't update *uval if the value wasn't read from userspace memory. This fixes process hang due to infinite loop in futex_lock_pi. It also fixes a bunch of glibc tests nptl/tst-mutexpi*. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov --- arch/xtensa/include/asm/futex.h | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index eaaf1eb..5bfbc1c 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h @@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { int ret = 0; - u32 prev; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; @@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __asm__ __volatile__ ( " # futex_atomic_cmpxchg_inatomic\n" - "1: l32i %1, %3, 0\n" - " mov %0, %5\n" - " wsr %1, scompare1\n" - "2: s32c1i %0, %3, 0\n" - "3:\n" + " wsr %5, scompare1\n" + "1: s32c1i %1, %4, 0\n" + " s32i %1, %6, 0\n" + "2:\n" " .section .fixup,\"ax\"\n" " .align 4\n" - "4: .long 3b\n" - "5: l32r %1, 4b\n" - " movi %0, %6\n" + "3: .long 2b\n" + "4: l32r %1, 3b\n" + " movi %0, %7\n" " jx %1\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .long 1b,5b,2b,5b\n" + " .long 1b,4b\n" " .previous\n" - : "+r" (ret), "=&r" (prev), "+m" (*uaddr) - : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) + : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) + : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) : "memory"); - *uval = prev; return ret; } -- cgit v1.1