diff options
Diffstat (limited to 'arch')
54 files changed, 785 insertions, 625 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 8bc52f7..c921e8b 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -15,6 +15,9 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select GENERIC_STRNCPY_FROM_USER if KASAN + select HAVE_ARCH_KASAN if MMU + select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS @@ -79,6 +82,10 @@ config VARIANT_IRQ_SWITCH config HAVE_XTENSA_GPIO32 def_bool n +config KASAN_SHADOW_OFFSET + hex + default 0x6e400000 + menu "Processor type and features" choice diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 7ee02fe..3a934b7 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -42,10 +42,11 @@ export PLATFORM # temporarily until string.h is fixed KBUILD_CFLAGS += -ffreestanding -D__linux__ - -KBUILD_CFLAGS += -pipe -mlongcalls - +KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) +KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,) + +KBUILD_AFLAGS += -mlongcalls -mtext-section-literals ifneq ($(CONFIG_LD_NO_RELAX),) LDFLAGS := --no-relax diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S index bf7fabe..bbf3b4b 100644 --- a/arch/xtensa/boot/boot-redboot/bootstrap.S +++ b/arch/xtensa/boot/boot-redboot/bootstrap.S @@ -42,6 +42,7 @@ __start_a0: .align 4 .section .text, "ax" + .literal_position .begin literal_prefix .text /* put literals in here! */ diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile index d2a7f48..355127f 100644 --- a/arch/xtensa/boot/lib/Makefile +++ b/arch/xtensa/boot/lib/Makefile @@ -15,6 +15,12 @@ CFLAGS_REMOVE_inftrees.o = -pg CFLAGS_REMOVE_inffast.o = -pg endif +KASAN_SANITIZE := n + +CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong +CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong +CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong +CFLAGS_REMOVE_inffast.o += -fstack-protector -fstack-protector-strong quiet_cmd_copy_zlib = COPY $@ cmd_copy_zlib = cat $< > $@ diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h index 746dcc8..7f2ae58 100644 --- a/arch/xtensa/include/asm/asmmacro.h +++ b/arch/xtensa/include/asm/asmmacro.h @@ -150,5 +150,45 @@ __endl \ar \as .endm +/* Load or store instructions that may cause exceptions use the EX macro. */ + +#define EX(handler) \ + .section __ex_table, "a"; \ + .word 97f, handler; \ + .previous \ +97: + + +/* + * Extract unaligned word that is split between two registers w0 and w1 + * into r regardless of machine endianness. SAR must be loaded with the + * starting bit of the word (see __ssa8). + */ + + .macro __src_b r, w0, w1 +#ifdef __XTENSA_EB__ + src \r, \w0, \w1 +#else + src \r, \w1, \w0 +#endif + .endm + +/* + * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned + * word starting at r from two registers loaded from consecutive aligned + * addresses covering r regardless of machine endianness. + * + * r 0 1 2 3 + * LE SAR 0 8 16 24 + * BE SAR 32 24 16 8 + */ + + .macro __ssa8 r +#ifdef __XTENSA_EB__ + ssa8b \r +#else + ssa8l \r +#endif + .endm #endif /* _XTENSA_ASMMACRO_H */ diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h index 47e46dc..5d98a7a 100644 --- a/arch/xtensa/include/asm/current.h +++ b/arch/xtensa/include/asm/current.h @@ -11,6 +11,8 @@ #ifndef _XTENSA_CURRENT_H #define _XTENSA_CURRENT_H +#include <asm/thread_info.h> + #ifndef __ASSEMBLY__ #include <linux/thread_info.h> @@ -26,8 +28,6 @@ static inline struct task_struct *get_current(void) #else -#define CURRENT_SHIFT 13 - #define GET_CURRENT(reg,sp) \ GET_THREAD_INFO(reg,sp); \ l32i reg, reg, TI_TASK \ diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index 0d30403..7e25c1b 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -44,7 +44,7 @@ enum fixed_addresses { __end_of_fixed_addresses }; -#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE) +#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) @@ -63,7 +63,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) * table. */ BUILD_BUG_ON(FIXADDR_START < - XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); + TLBTEMP_BASE_1 + TLBTEMP_SIZE); BUILD_BUG_ON(idx >= __end_of_fixed_addresses); return __fix_to_virt(idx); } diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index eaaf1eb..5bfbc1c 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h @@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { int ret = 0; - u32 prev; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; @@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __asm__ __volatile__ ( " # futex_atomic_cmpxchg_inatomic\n" - "1: l32i %1, %3, 0\n" - " mov %0, %5\n" - " wsr %1, scompare1\n" - "2: s32c1i %0, %3, 0\n" - "3:\n" + " wsr %5, scompare1\n" + "1: s32c1i %1, %4, 0\n" + " s32i %1, %6, 0\n" + "2:\n" " .section .fixup,\"ax\"\n" " .align 4\n" - "4: .long 3b\n" - "5: l32r %1, 4b\n" - " movi %0, %6\n" + "3: .long 2b\n" + "4: l32r %1, 3b\n" + " movi %0, %7\n" " jx %1\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .long 1b,5b,2b,5b\n" + " .long 1b,4b\n" " .previous\n" - : "+r" (ret), "=&r" (prev), "+m" (*uaddr) - : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) + : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) + : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) : "memory"); - *uval = prev; return ret; } diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 6e070db..04e9340 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -72,7 +72,7 @@ static inline void *kmap(struct page *page) * page table. */ BUILD_BUG_ON(PKMAP_BASE < - XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); + TLBTEMP_BASE_1 + TLBTEMP_SIZE); BUG_ON(in_interrupt()); if (!PageHighMem(page)) return page_address(page); diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h new file mode 100644 index 0000000..54be808 --- /dev/null +++ b/arch/xtensa/include/asm/kasan.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_KASAN + +#include <linux/kernel.h> +#include <linux/sizes.h> +#include <asm/kmem_layout.h> + +/* Start of area covered by KASAN */ +#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000) +/* Start of the shadow map */ +#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE) +/* Size of the shadow map */ +#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT) +/* Offset for mem to shadow address transformation */ +#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET) + +void __init kasan_early_init(void); +void __init kasan_init(void); + +#else + +static inline void kasan_early_init(void) +{ +} + +static inline void kasan_init(void) +{ +} + +#endif +#endif +#endif diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 561f872..2317c83 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -71,4 +71,11 @@ #endif +#ifndef CONFIG_KASAN +#define KERNEL_STACK_SHIFT 13 +#else +#define KERNEL_STACK_SHIFT 15 +#endif +#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT) + #endif diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h new file mode 100644 index 0000000..0ba9973 --- /dev/null +++ b/arch/xtensa/include/asm/linkage.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 4 +#define __ALIGN_STR ".align 4" + +#endif diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index f7e186d..de5e6cb 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h @@ -52,6 +52,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache); #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) void init_mmu(void); +void init_kio(void); static inline void set_rasid_register (unsigned long val) { diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h index 2cebdbb..37251b2 100644 --- a/arch/xtensa/include/asm/nommu_context.h +++ b/arch/xtensa/include/asm/nommu_context.h @@ -3,6 +3,10 @@ static inline void init_mmu(void) { } +static inline void init_kio(void) +{ +} + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 4ddbfd5..5d69c11 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -36,8 +36,6 @@ #define MAX_LOW_PFN PHYS_PFN(0xfffffffful) #endif -#define PGTABLE_START 0x80000000 - /* * Cache aliasing: * diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 30dd5b2..3880225 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -12,9 +12,9 @@ #define _XTENSA_PGTABLE_H #define __ARCH_USE_5LEVEL_HACK -#include <asm-generic/pgtable-nopmd.h> #include <asm/page.h> #include <asm/kmem_layout.h> +#include <asm-generic/pgtable-nopmd.h> /* * We only use two ring levels, user and kernel space. @@ -170,6 +170,7 @@ #define PAGE_SHARED_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) +#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) #if (DCACHE_WAY_SIZE > PAGE_SIZE) diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index e2d9c5e..3a5c591 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h @@ -10,6 +10,7 @@ #ifndef _XTENSA_PTRACE_H #define _XTENSA_PTRACE_H +#include <asm/kmem_layout.h> #include <uapi/asm/ptrace.h> /* @@ -38,20 +39,6 @@ * +-----------------------+ -------- */ -#define KERNEL_STACK_SIZE (2 * PAGE_SIZE) - -/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */ - -#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */ -#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */ -#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */ -#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */ -#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */ -#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */ -#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */ -#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */ -#define EXC_TABLE_SIZE 0x400 - #ifndef __ASSEMBLY__ #include <asm/coprocessor.h> diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h index 881a113..477594e 100644 --- a/arch/xtensa/include/asm/regs.h +++ b/arch/xtensa/include/asm/regs.h @@ -76,6 +76,7 @@ #define EXCCAUSE_COPROCESSOR5_DISABLED 37 #define EXCCAUSE_COPROCESSOR6_DISABLED 38 #define EXCCAUSE_COPROCESSOR7_DISABLED 39 +#define EXCCAUSE_N 64 /* PS register fields. */ diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h new file mode 100644 index 0000000..e368f94 --- /dev/null +++ b/arch/xtensa/include/asm/stackprotector.h @@ -0,0 +1,40 @@ +/* + * GCC stack protector support. + * + * (This is directly adopted from the ARM implementation) + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary + * and gcc expects it to be defined by a global variable called + * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP + * we cannot have a different canary value per task. + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h index 8d5d9df..89b51a0 100644 --- a/arch/xtensa/include/asm/string.h +++ b/arch/xtensa/include/asm/string.h @@ -53,7 +53,7 @@ static inline char *strncpy(char *__dest, const char *__src, size_t __n) "bne %1, %5, 1b\n" "2:" : "=r" (__dest), "=r" (__src), "=&r" (__dummy) - : "0" (__dest), "1" (__src), "r" (__src+__n) + : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n) : "memory"); return __xdest; @@ -101,21 +101,40 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n) "2:\n\t" "sub %2, %2, %3" : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) - : "0" (__cs), "1" (__ct), "r" (__cs+__n)); + : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n)); return __res; } #define __HAVE_ARCH_MEMSET extern void *memset(void *__s, int __c, size_t __count); +extern void *__memset(void *__s, int __c, size_t __count); #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *__to, __const__ void *__from, size_t __n); +extern void *__memcpy(void *__to, __const__ void *__from, size_t __n); #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *__dest, __const__ void *__src, size_t __n); +extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); /* Don't build bcopy at all ... */ #define __HAVE_ARCH_BCOPY +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) + +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * should use not instrumented version of mem* functions. + */ + +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif +#endif + #endif /* _XTENSA_STRING_H */ diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 2ccd375..2bd19ae 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -11,7 +11,9 @@ #ifndef _XTENSA_THREAD_INFO_H #define _XTENSA_THREAD_INFO_H -#ifdef __KERNEL__ +#include <asm/kmem_layout.h> + +#define CURRENT_SHIFT KERNEL_STACK_SHIFT #ifndef __ASSEMBLY__ # include <asm/processor.h> @@ -81,7 +83,7 @@ struct thread_info { static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; - __asm__("extui %0,a1,0,13\n\t" + __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t" "xor %0, a1, %0" : "=&r" (ti) : ); return ti; } @@ -90,7 +92,7 @@ static inline struct thread_info *current_thread_info(void) /* how to get the thread information struct from ASM */ #define GET_THREAD_INFO(reg,sp) \ - extui reg, sp, 0, 13; \ + extui reg, sp, 0, CURRENT_SHIFT; \ xor reg, sp, reg #endif @@ -127,8 +129,7 @@ static inline struct thread_info *current_thread_info(void) */ #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ -#define THREAD_SIZE 8192 //(2*PAGE_SIZE) -#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE KERNEL_STACK_SIZE +#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT) -#endif /* __KERNEL__ */ #endif /* _XTENSA_THREAD_INFO */ diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 2e69aa4..f5cd7a7 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h @@ -13,12 +13,47 @@ #include <asm/ptrace.h> /* + * Per-CPU exception handling data structure. + * EXCSAVE1 points to it. + */ +struct exc_table { + /* Kernel Stack */ + void *kstk; + /* Double exception save area for a0 */ + unsigned long double_save; + /* Fixup handler */ + void *fixup; + /* For passing a parameter to fixup */ + void *fixup_param; + /* For fast syscall handler */ + unsigned long syscall_save; + /* Fast user exception handlers */ + void *fast_user_handler[EXCCAUSE_N]; + /* Fast kernel exception handlers */ + void *fast_kernel_handler[EXCCAUSE_N]; + /* Default C-Handlers */ + void *default_handler[EXCCAUSE_N]; +}; + +/* * handler must be either of the following: * void (*)(struct pt_regs *regs); * void (*)(struct pt_regs *regs, unsigned long exccause); */ extern void * __init trap_set_handler(int cause, void *handler); extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); +void fast_second_level_miss(void); + +/* Initialize minimal exc_table structure sufficient for basic paging */ +static inline void __init early_trap_init(void) +{ + static struct exc_table exc_table __initdata = { + .fast_kernel_handler[EXCCAUSE_DTLB_MISS] = + fast_second_level_miss, + }; + __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table)); +} + void secondary_trap_init(void); static inline void spill_registers(void) diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index b8f152b..f1158b4 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -44,6 +44,8 @@ #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) + /* * These are the main single-value transfer routines. They * automatically use the right size if we just have the right pointer @@ -261,7 +263,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __xtensa_clear_user(void *addr, unsigned long size) { - if ( ! memset(addr, 0, size) ) + if (!__memset(addr, 0, size)) return size; return 0; } @@ -277,6 +279,8 @@ clear_user(void *addr, unsigned long size) #define __clear_user __xtensa_clear_user +#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER + extern long __strncpy_user(char *, const char *, long); static inline long @@ -286,6 +290,9 @@ strncpy_from_user(char *dst, const char *src, long count) return __strncpy_user(dst, src, count); return -EFAULT; } +#else +long strncpy_from_user(char *dst, const char *src, long count); +#endif /* * Return the size of a string (including the ending 0!) diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index bb8d557..9190759 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -17,9 +17,6 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o -AFLAGS_head.o += -mtext-section-literals -AFLAGS_mxhead.o += -mtext-section-literals - # In the Xtensa architecture, assembly generates literals which must always # precede the L32R instruction with a relative offset less than 256 kB. # Therefore, the .text and .literal section must be combined in parenthesis diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 890004a..9301452e 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -19,6 +19,7 @@ #include <linux/linkage.h> #include <asm/current.h> #include <asm/asm-offsets.h> +#include <asm/asmmacro.h> #include <asm/processor.h> #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION @@ -66,8 +67,6 @@ #define INSN_T 24 #define INSN_OP1 16 -.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm -.macro __ssa8 r; ssa8b \r; .endm .macro __ssa8r r; ssa8l \r; .endm .macro __sh r, s; srl \r, \s; .endm .macro __sl r, s; sll \r, \s; .endm @@ -81,8 +80,6 @@ #define INSN_T 4 #define INSN_OP1 12 -.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm -.macro __ssa8 r; ssa8l \r; .endm .macro __ssa8r r; ssa8b \r; .endm .macro __sh r, s; sll \r, \s; .endm .macro __sl r, s; srl \r, \s; .endm @@ -155,7 +152,7 @@ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ - + .literal_position ENTRY(fast_unaligned) /* Note: We don't expect the address to be aligned on a word diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index bcb5beb..022cf91 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -76,6 +76,9 @@ int main(void) DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); +#ifdef CONFIG_CC_STACKPROTECTOR + DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); +#endif DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); /* offsets in thread_info struct */ @@ -129,5 +132,18 @@ int main(void) offsetof(struct debug_table, icount_level_save)); #endif + /* struct exc_table */ + DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk)); + DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save)); + DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup)); + DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param)); + DEFINE(EXC_TABLE_SYSCALL_SAVE, + offsetof(struct exc_table, syscall_save)); + DEFINE(EXC_TABLE_FAST_USER, + offsetof(struct exc_table, fast_user_handler)); + DEFINE(EXC_TABLE_FAST_KERNEL, + offsetof(struct exc_table, fast_kernel_handler)); + DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler)); + return 0; } diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 3a98503..4f8b52d 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore) ENTRY(fast_coprocessor_double) wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 + call0 unrecoverable_exception ENDPROC(fast_coprocessor_double) diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 37a2395..5caff07 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -14,6 +14,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> +#include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/coprocessor.h> #include <asm/thread_info.h> @@ -125,6 +126,7 @@ * * Note: _user_exception might be at an odd address. Don't use call0..call12 */ + .literal_position ENTRY(user_exception) @@ -475,8 +477,7 @@ common_exception_return: 1: irq_save a2, a3 #ifdef CONFIG_TRACE_IRQFLAGS - movi a4, trace_hardirqs_off - callx4 a4 + call4 trace_hardirqs_off #endif /* Jump if we are returning from kernel exceptions. */ @@ -503,24 +504,20 @@ common_exception_return: /* Call do_signal() */ #ifdef CONFIG_TRACE_IRQFLAGS - movi a4, trace_hardirqs_on - callx4 a4 + call4 trace_hardirqs_on #endif rsil a2, 0 - movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) mov a6, a1 - callx4 a4 + call4 do_notify_resume # int do_notify_resume(struct pt_regs*) j 1b 3: /* Reschedule */ #ifdef CONFIG_TRACE_IRQFLAGS - movi a4, trace_hardirqs_on - callx4 a4 + call4 trace_hardirqs_on #endif rsil a2, 0 - movi a4, schedule # void schedule (void) - callx4 a4 + call4 schedule # void schedule (void) j 1b #ifdef CONFIG_PREEMPT @@ -531,8 +528,7 @@ common_exception_return: l32i a4, a2, TI_PRE_COUNT bnez a4, 4f - movi a4, preempt_schedule_irq - callx4 a4 + call4 preempt_schedule_irq j 1b #endif @@ -545,23 +541,20 @@ common_exception_return: 5: #ifdef CONFIG_HAVE_HW_BREAKPOINT _bbci.l a4, TIF_DB_DISABLED, 7f - movi a4, restore_dbreak - callx4 a4 + call4 restore_dbreak 7: #endif #ifdef CONFIG_DEBUG_TLB_SANITY l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f - movi a4, check_tlb_sanity - callx4 a4 + call4 check_tlb_sanity #endif 6: 4: #ifdef CONFIG_TRACE_IRQFLAGS extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH bgei a4, LOCKLEVEL, 1f - movi a4, trace_hardirqs_on - callx4 a4 + call4 trace_hardirqs_on 1: #endif /* Restore optional registers. */ @@ -777,6 +770,8 @@ ENDPROC(kernel_exception) * When we get here, a0 is trashed and saved to excsave[debuglevel] */ + .literal_position + ENTRY(debug_exception) rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL @@ -916,6 +911,8 @@ ENDPROC(debug_exception) unrecoverable_text: .ascii "Unrecoverable error in exception handler\0" + .literal_position + ENTRY(unrecoverable_exception) movi a0, 1 @@ -933,10 +930,8 @@ ENTRY(unrecoverable_exception) movi a0, 0 addi a1, a1, PT_REGS_OFFSET - movi a4, panic movi a6, unrecoverable_text - - callx4 a4 + call4 panic 1: j 1b @@ -1073,8 +1068,7 @@ ENTRY(fast_syscall_unrecoverable) xsr a2, depc # restore a2, depc wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 + call0 unrecoverable_exception ENDPROC(fast_syscall_unrecoverable) @@ -1101,32 +1095,11 @@ ENDPROC(fast_syscall_unrecoverable) * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Note: we don't have to save a2; a2 holds the return value - * - * We use the two macros TRY and CATCH: - * - * TRY adds an entry to the __ex_table fixup table for the immediately - * following instruction. - * - * CATCH catches any exception that occurred at one of the preceding TRY - * statements and continues from there - * - * Usage TRY l32i a0, a1, 0 - * <other code> - * done: rfe - * CATCH <set return code> - * j done */ -#ifdef CONFIG_FAST_SYSCALL_XTENSA - -#define TRY \ - .section __ex_table, "a"; \ - .word 66f, 67f; \ - .text; \ -66: + .literal_position -#define CATCH \ -67: +#ifdef CONFIG_FAST_SYSCALL_XTENSA ENTRY(fast_syscall_xtensa) @@ -1141,9 +1114,9 @@ ENTRY(fast_syscall_xtensa) .Lswp: /* Atomic compare and swap */ -TRY l32i a0, a3, 0 # read old value +EX(.Leac) l32i a0, a3, 0 # read old value bne a0, a4, 1f # same as old value? jump -TRY s32i a5, a3, 0 # different, modify value +EX(.Leac) s32i a5, a3, 0 # different, modify value l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 1 # and return 1 @@ -1156,12 +1129,12 @@ TRY s32i a5, a3, 0 # different, modify value .Lnswp: /* Atomic set, add, and exg_add. */ -TRY l32i a7, a3, 0 # orig +EX(.Leac) l32i a7, a3, 0 # orig addi a6, a6, -SYS_XTENSA_ATOMIC_SET add a0, a4, a7 # + arg moveqz a0, a4, a6 # set addi a6, a6, SYS_XTENSA_ATOMIC_SET -TRY s32i a0, a3, 0 # write new value +EX(.Leac) s32i a0, a3, 0 # write new value mov a0, a2 mov a2, a7 @@ -1169,7 +1142,6 @@ TRY s32i a0, a3, 0 # write new value l32i a0, a0, PT_AREG0 # restore a0 rfe -CATCH .Leac: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EFAULT @@ -1411,14 +1383,12 @@ ENTRY(fast_syscall_spill_registers) rsync movi a6, SIGSEGV - movi a4, do_exit - callx4 a4 + call4 do_exit /* shouldn't return, so panic */ wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 # should not return + call0 unrecoverable_exception # should not return 1: j 1b @@ -1564,8 +1534,8 @@ ENDPROC(fast_syscall_spill_registers) ENTRY(fast_second_level_miss_double_kernel) -1: movi a0, unrecoverable_exception - callx0 a0 # should not return +1: + call0 unrecoverable_exception # should not return 1: j 1b ENDPROC(fast_second_level_miss_double_kernel) @@ -1887,6 +1857,7 @@ ENDPROC(fast_store_prohibited) * void system_call (struct pt_regs* regs, int exccause) * a2 a3 */ + .literal_position ENTRY(system_call) @@ -1896,9 +1867,8 @@ ENTRY(system_call) l32i a3, a2, PT_AREG2 mov a6, a2 - movi a4, do_syscall_trace_enter s32i a3, a2, PT_SYSCALL - callx4 a4 + call4 do_syscall_trace_enter mov a3, a6 /* syscall = sys_call_table[syscall_nr] */ @@ -1930,9 +1900,8 @@ ENTRY(system_call) 1: /* regs->areg[2] = return_value */ s32i a6, a2, PT_AREG2 - movi a4, do_syscall_trace_leave mov a6, a2 - callx4 a4 + call4 do_syscall_trace_leave retw ENDPROC(system_call) @@ -2002,6 +1971,12 @@ ENTRY(_switch_to) s32i a1, a2, THREAD_SP # save stack pointer #endif +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + movi a6, __stack_chk_guard + l32i a8, a3, TASK_STACK_CANARY + s32i a8, a6, 0 +#endif + /* Disable ints while we manipulate the stack pointer. */ irq_save a14, a3 @@ -2048,12 +2023,10 @@ ENTRY(ret_from_fork) /* void schedule_tail (struct task_struct *prev) * Note: prev is still in a6 (return value from fake call4 frame) */ - movi a4, schedule_tail - callx4 a4 + call4 schedule_tail - movi a4, do_syscall_trace_leave mov a6, a1 - callx4 a4 + call4 do_syscall_trace_leave j common_exception_return diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 23ce62e..9c4e943 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -264,11 +264,8 @@ ENTRY(_startup) /* init_arch kick-starts the linux kernel */ - movi a4, init_arch - callx4 a4 - - movi a4, start_kernel - callx4 a4 + call4 init_arch + call4 start_kernel should_never_return: j should_never_return @@ -294,8 +291,7 @@ should_never_return: movi a6, 0 wsr a6, excsave1 - movi a4, secondary_start_kernel - callx4 a4 + call4 secondary_start_kernel j should_never_return #endif /* CONFIG_SMP */ diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c index b715237..902845d 100644 --- a/arch/xtensa/kernel/module.c +++ b/arch/xtensa/kernel/module.c @@ -22,8 +22,6 @@ #include <linux/kernel.h> #include <linux/cache.h> -#undef DEBUG_RELOCATE - static int decode_calln_opcode (unsigned char *location) { @@ -58,10 +56,9 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, unsigned char *location; uint32_t value; -#ifdef DEBUG_RELOCATE - printk("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); -#endif + pr_debug("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rela[i].r_offset; @@ -87,7 +84,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, value -= ((unsigned long)location & -4) + 4; if ((value & 3) != 0 || ((value + (1 << 19)) >> 20) != 0) { - printk("%s: relocation out of range, " + pr_err("%s: relocation out of range, " "section %d reloc %d " "sym '%s'\n", mod->name, relsec, i, @@ -111,7 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, value -= (((unsigned long)location + 3) & -4); if ((value & 3) != 0 || (signed int)value >> 18 != -1) { - printk("%s: relocation out of range, " + pr_err("%s: relocation out of range, " "section %d reloc %d " "sym '%s'\n", mod->name, relsec, i, @@ -156,7 +153,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, case R_XTENSA_SLOT12_OP: case R_XTENSA_SLOT13_OP: case R_XTENSA_SLOT14_OP: - printk("%s: unexpected FLIX relocation: %u\n", + pr_err("%s: unexpected FLIX relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; @@ -176,13 +173,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, case R_XTENSA_SLOT12_ALT: case R_XTENSA_SLOT13_ALT: case R_XTENSA_SLOT14_ALT: - printk("%s: unexpected ALT relocation: %u\n", + pr_err("%s: unexpected ALT relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; default: - printk("%s: unexpected relocation: %u\n", + pr_err("%s: unexpected relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index 903963e..d981f01 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -29,14 +29,6 @@ #include <asm/pci-bridge.h> #include <asm/platform.h> -#undef DEBUG - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - /* PCI Controller */ @@ -101,8 +93,8 @@ pcibios_enable_resources(struct pci_dev *dev, int mask) for(idx=0; idx<6; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) { - printk (KERN_ERR "PCI: Device %s not available because " - "of resource collisions\n", pci_name(dev)); + pr_err("PCI: Device %s not available because " + "of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) @@ -113,7 +105,7 @@ pcibios_enable_resources(struct pci_dev *dev, int mask) if (dev->resource[PCI_ROM_RESOURCE].start) cmd |= PCI_COMMAND_MEMORY; if (cmd != old_cmd) { - printk("PCI: Enabling device %s (%04x -> %04x)\n", + pr_info("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -144,8 +136,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, res = &pci_ctrl->io_resource; if (!res->flags) { if (io_offset) - printk (KERN_ERR "I/O resource not set for host" - " bridge %d\n", pci_ctrl->index); + pr_err("I/O resource not set for host bridge %d\n", + pci_ctrl->index); res->start = 0; res->end = IO_SPACE_LIMIT; res->flags = IORESOURCE_IO; @@ -159,8 +151,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, if (!res->flags) { if (i > 0) continue; - printk(KERN_ERR "Memory resource not set for " - "host bridge %d\n", pci_ctrl->index); + pr_err("Memory resource not set for host bridge %d\n", + pci_ctrl->index); res->start = 0; res->end = ~0U; res->flags = IORESOURCE_MEM; @@ -176,7 +168,7 @@ static int __init pcibios_init(void) struct pci_bus *bus; int next_busno = 0, ret; - printk("PCI: Probing PCI hardware\n"); + pr_info("PCI: Probing PCI hardware\n"); /* Scan all of the recorded PCI controllers. */ for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) { @@ -232,7 +224,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) for (idx=0; idx<6; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) { - printk(KERN_ERR "PCI: Device %s not available because " + pr_err("PCI: Device %s not available because " "of resource collisions\n", pci_name(dev)); return -EINVAL; } @@ -242,8 +234,8 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { - printk("PCI: Enabling device %s (%04x -> %04x)\n", - pci_name(dev), old_cmd, cmd); + pr_info("PCI: Enabling device %s (%04x -> %04x)\n", + pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index ff4f0ec..8dd0593 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -58,6 +58,12 @@ void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); +#ifdef CONFIG_CC_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + #if XTENSA_HAVE_COPROCESSORS void coprocessor_release_all(struct thread_info *ti) diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 08175df..a931af9 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -36,6 +36,7 @@ #endif #include <asm/bootparam.h> +#include <asm/kasan.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -156,7 +157,7 @@ static int __init parse_bootparam(const bp_tag_t* tag) /* Boot parameters must start with a BP_TAG_FIRST tag. */ if (tag->id != BP_TAG_FIRST) { - printk(KERN_WARNING "Invalid boot parameters!\n"); + pr_warn("Invalid boot parameters!\n"); return 0; } @@ -165,15 +166,14 @@ static int __init parse_bootparam(const bp_tag_t* tag) /* Parse all tags. */ while (tag != NULL && tag->id != BP_TAG_LAST) { - for (t = &__tagtable_begin; t < &__tagtable_end; t++) { + for (t = &__tagtable_begin; t < &__tagtable_end; t++) { if (tag->id == t->tag) { t->parse(tag); break; } } if (t == &__tagtable_end) - printk(KERN_WARNING "Ignoring tag " - "0x%08x\n", tag->id); + pr_warn("Ignoring tag 0x%08x\n", tag->id); tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); } @@ -208,6 +208,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname, /* round down to nearest 256MB boundary */ xtensa_kio_paddr &= 0xf0000000; + init_kio(); + return 1; } #else @@ -246,6 +248,14 @@ void __init early_init_devtree(void *params) void __init init_arch(bp_tag_t *bp_start) { + /* Initialize MMU. */ + + init_mmu(); + + /* Initialize initial KASAN shadow map */ + + kasan_early_init(); + /* Parse boot parameters */ if (bp_start) @@ -263,10 +273,6 @@ void __init init_arch(bp_tag_t *bp_start) /* Early hook for platforms */ platform_init(bp_start); - - /* Initialize MMU. */ - - init_mmu(); } /* @@ -277,13 +283,13 @@ extern char _end[]; extern char _stext[]; extern char _WindowVectors_text_start; extern char _WindowVectors_text_end; -extern char _DebugInterruptVector_literal_start; +extern char _DebugInterruptVector_text_start; extern char _DebugInterruptVector_text_end; -extern char _KernelExceptionVector_literal_start; +extern char _KernelExceptionVector_text_start; extern char _KernelExceptionVector_text_end; -extern char _UserExceptionVector_literal_start; +extern char _UserExceptionVector_text_start; extern char _UserExceptionVector_text_end; -extern char _DoubleExceptionVector_literal_start; +extern char _DoubleExceptionVector_text_start; extern char _DoubleExceptionVector_text_end; #if XCHAL_EXCM_LEVEL >= 2 extern char _Level2InterruptVector_text_start; @@ -317,6 +323,13 @@ static inline int mem_reserve(unsigned long start, unsigned long end) void __init setup_arch(char **cmdline_p) { + pr_info("config ID: %08x:%08x\n", + get_sr(SREG_EPC), get_sr(SREG_EXCSAVE)); + if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 || + get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1) + pr_info("built for config ID: %08x:%08x\n", + XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1); + *cmdline_p = command_line; platform_setup(cmdline_p); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); @@ -339,16 +352,16 @@ void __init setup_arch(char **cmdline_p) mem_reserve(__pa(&_WindowVectors_text_start), __pa(&_WindowVectors_text_end)); - mem_reserve(__pa(&_DebugInterruptVector_literal_start), + mem_reserve(__pa(&_DebugInterruptVector_text_start), __pa(&_DebugInterruptVector_text_end)); - mem_reserve(__pa(&_KernelExceptionVector_literal_start), + mem_reserve(__pa(&_KernelExceptionVector_text_start), __pa(&_KernelExceptionVector_text_end)); - mem_reserve(__pa(&_UserExceptionVector_literal_start), + mem_reserve(__pa(&_UserExceptionVector_text_start), __pa(&_UserExceptionVector_text_end)); - mem_reserve(__pa(&_DoubleExceptionVector_literal_start), + mem_reserve(__pa(&_DoubleExceptionVector_text_start), __pa(&_DoubleExceptionVector_text_end)); #if XCHAL_EXCM_LEVEL >= 2 @@ -380,7 +393,7 @@ void __init setup_arch(char **cmdline_p) #endif parse_early_param(); bootmem_init(); - + kasan_init(); unflatten_and_copy_device_tree(); #ifdef CONFIG_SMP @@ -582,12 +595,14 @@ c_show(struct seq_file *f, void *slot) "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" "core ID\t\t: " XCHAL_CORE_ID "\n" "build ID\t: 0x%x\n" + "config ID\t: %08x:%08x\n" "byte order\t: %s\n" "cpu MHz\t\t: %lu.%02lu\n" "bogomips\t: %lu.%02lu\n", num_online_cpus(), cpumask_pr_args(cpu_online_mask), XCHAL_BUILD_UNIQUE_ID, + get_sr(SREG_EPC), get_sr(SREG_EXCSAVE), XCHAL_HAVE_BE ? "big" : "little", ccount_freq/1000000, (ccount_freq/10000) % 100, diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index d427e78..f88e7a0 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -28,8 +28,6 @@ #include <asm/coprocessor.h> #include <asm/unistd.h> -#define DEBUG_SIG 0 - extern struct task_struct *coproc_owners[]; struct rt_sigframe @@ -399,10 +397,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, regs->areg[8] = (unsigned long) &frame->uc; regs->threadptr = tp; -#if DEBUG_SIG - printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n", - current->comm, current->pid, sig, frame, regs->pc); -#endif + pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n", + current->comm, current->pid, sig, frame, regs->pc); return 0; } diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index bae697a..32c5207 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -33,6 +33,7 @@ #include <linux/kallsyms.h> #include <linux/delay.h> #include <linux/hardirq.h> +#include <linux/ratelimit.h> #include <asm/stacktrace.h> #include <asm/ptrace.h> @@ -158,8 +159,7 @@ COPROCESSOR(7), * 2. it is a temporary memory buffer for the exception handlers. */ -DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]); - +DEFINE_PER_CPU(struct exc_table, exc_table); DEFINE_PER_CPU(struct debug_table, debug_table); void die(const char*, struct pt_regs*, long); @@ -178,13 +178,14 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err) void do_unhandled(struct pt_regs *regs, unsigned long exccause) { __die_if_kernel("Caught unhandled exception - should not happen", - regs, SIGKILL); + regs, SIGKILL); /* If in user mode, send SIGILL signal to current process */ - printk("Caught unhandled exception in '%s' " - "(pid = %d, pc = %#010lx) - should not happen\n" - "\tEXCCAUSE is %ld\n", - current->comm, task_pid_nr(current), regs->pc, exccause); + pr_info_ratelimited("Caught unhandled exception in '%s' " + "(pid = %d, pc = %#010lx) - should not happen\n" + "\tEXCCAUSE is %ld\n", + current->comm, task_pid_nr(current), regs->pc, + exccause); force_sig(SIGILL, current); } @@ -305,8 +306,8 @@ do_illegal_instruction(struct pt_regs *regs) /* If in user mode, send SIGILL signal to current process. */ - printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", - current->comm, task_pid_nr(current), regs->pc); + pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", + current->comm, task_pid_nr(current), regs->pc); force_sig(SIGILL, current); } @@ -325,13 +326,14 @@ do_unaligned_user (struct pt_regs *regs) siginfo_t info; __die_if_kernel("Unhandled unaligned exception in kernel", - regs, SIGKILL); + regs, SIGKILL); current->thread.bad_vaddr = regs->excvaddr; current->thread.error_code = -3; - printk("Unaligned memory access to %08lx in '%s' " - "(pid = %d, pc = %#010lx)\n", - regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); + pr_info_ratelimited("Unaligned memory access to %08lx in '%s' " + "(pid = %d, pc = %#010lx)\n", + regs->excvaddr, current->comm, + task_pid_nr(current), regs->pc); info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; @@ -365,28 +367,28 @@ do_debug(struct pt_regs *regs) } -static void set_handler(int idx, void *handler) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) - per_cpu(exc_table, cpu)[idx] = (unsigned long)handler; -} +#define set_handler(type, cause, handler) \ + do { \ + unsigned int cpu; \ + \ + for_each_possible_cpu(cpu) \ + per_cpu(exc_table, cpu).type[cause] = (handler);\ + } while (0) /* Set exception C handler - for temporary use when probing exceptions */ void * __init trap_set_handler(int cause, void *handler) { - void *previous = (void *)per_cpu(exc_table, 0)[ - EXC_TABLE_DEFAULT / 4 + cause]; - set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler); + void *previous = per_cpu(exc_table, 0).default_handler[cause]; + + set_handler(default_handler, cause, handler); return previous; } static void trap_init_excsave(void) { - unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); + unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table); __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); } @@ -418,10 +420,10 @@ void __init trap_init(void) /* Setup default vectors. */ - for(i = 0; i < 64; i++) { - set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception); - set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception); - set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled); + for (i = 0; i < EXCCAUSE_N; i++) { + set_handler(fast_user_handler, i, user_exception); + set_handler(fast_kernel_handler, i, kernel_exception); + set_handler(default_handler, i, do_unhandled); } /* Setup specific handlers. */ @@ -433,11 +435,11 @@ void __init trap_init(void) void *handler = dispatch_init_table[i].handler; if (fast == 0) - set_handler (EXC_TABLE_DEFAULT/4 + cause, handler); + set_handler(default_handler, cause, handler); if (fast && fast & USER) - set_handler (EXC_TABLE_FAST_USER/4 + cause, handler); + set_handler(fast_user_handler, cause, handler); if (fast && fast & KRNL) - set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler); + set_handler(fast_kernel_handler, cause, handler); } /* Initialize EXCSAVE_1 to hold the address of the exception table. */ diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 332e9d6..841503d 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -205,9 +205,6 @@ ENDPROC(_KernelExceptionVector) */ .section .DoubleExceptionVector.text, "ax" - .begin literal_prefix .DoubleExceptionVector - .globl _DoubleExceptionVector_WindowUnderflow - .globl _DoubleExceptionVector_WindowOverflow ENTRY(_DoubleExceptionVector) @@ -217,8 +214,12 @@ ENTRY(_DoubleExceptionVector) /* Check for kernel double exception (usually fatal). */ rsr a2, ps - _bbci.l a2, PS_UM_BIT, .Lksp + _bbsi.l a2, PS_UM_BIT, 1f + j .Lksp + .align 4 + .literal_position +1: /* Check if we are currently handling a window exception. */ /* Note: We don't need to indicate that we enter a critical section. */ @@ -304,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow: .Lunrecoverable: rsr a3, excsave1 wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 + call0 unrecoverable_exception .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ @@ -475,11 +475,8 @@ _DoubleExceptionVector_handle_exception: rotw -3 j 1b - ENDPROC(_DoubleExceptionVector) - .end literal_prefix - .text /* * Fixup handler for TLB miss in double exception handler for window owerflow. @@ -508,6 +505,8 @@ ENDPROC(_DoubleExceptionVector) * a3: exctable, original value in excsave1 */ + .literal_position + ENTRY(window_overflow_restore_a0_fixup) rsr a0, ps diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 162c77e..70b731e 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -45,24 +45,16 @@ jiffies = jiffies_64; LONG(sym ## _end); \ LONG(LOADADDR(section)) -/* Macro to define a section for a vector. - * - * Use of the MIN function catches the types of errors illustrated in - * the following example: - * - * Assume the section .DoubleExceptionVector.literal is completely - * full. Then a programmer adds code to .DoubleExceptionVector.text - * that produces another literal. The final literal position will - * overlay onto the first word of the adjacent code section - * .DoubleExceptionVector.text. (In practice, the literals will - * overwrite the code, and the first few instructions will be - * garbage.) +/* + * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is + * defined code for every vector is located with other init data. At startup + * time head.S copies code for every vector to its final position according + * to description recorded in the corresponding RELOCATE_ENTRY. */ #ifdef CONFIG_VECTORS_OFFSET -#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \ - section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \ - LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ +#define SECTION_VECTOR(sym, section, addr, prevsec) \ + section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ { \ . = ALIGN(4); \ sym ## _start = ABSOLUTE(.); \ @@ -112,26 +104,19 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 6 SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) #endif - SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4) SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) - SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4) SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) - SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) - SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20) SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) #endif + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + ENTRY_TEXT TEXT_TEXT - VMLINUX_SYMBOL(__sched_text_start) = .; - *(.sched.literal .sched.text) - VMLINUX_SYMBOL(__sched_text_end) = .; - VMLINUX_SYMBOL(__cpuidle_text_start) = .; - *(.cpuidle.literal .cpuidle.text) - VMLINUX_SYMBOL(__cpuidle_text_end) = .; - VMLINUX_SYMBOL(__lock_text_start) = .; - *(.spinlock.literal .spinlock.text) - VMLINUX_SYMBOL(__lock_text_end) = .; + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT } _etext = .; @@ -196,8 +181,6 @@ SECTIONS .KernelExceptionVector.text); RELOCATE_ENTRY(_UserExceptionVector_text, .UserExceptionVector.text); - RELOCATE_ENTRY(_DoubleExceptionVector_literal, - .DoubleExceptionVector.literal); RELOCATE_ENTRY(_DoubleExceptionVector_text, .DoubleExceptionVector.text); RELOCATE_ENTRY(_DebugInterruptVector_text, @@ -230,25 +213,19 @@ SECTIONS SECTION_VECTOR (_WindowVectors_text, .WindowVectors.text, - WINDOW_VECTORS_VADDR, 4, + WINDOW_VECTORS_VADDR, .dummy) - SECTION_VECTOR (_DebugInterruptVector_literal, - .DebugInterruptVector.literal, - DEBUG_VECTOR_VADDR - 4, - SIZEOF(.WindowVectors.text), - .WindowVectors.text) SECTION_VECTOR (_DebugInterruptVector_text, .DebugInterruptVector.text, DEBUG_VECTOR_VADDR, - 4, - .DebugInterruptVector.literal) + .WindowVectors.text) #undef LAST #define LAST .DebugInterruptVector.text #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR (_Level2InterruptVector_text, .Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level2InterruptVector.text #endif @@ -256,7 +233,7 @@ SECTIONS SECTION_VECTOR (_Level3InterruptVector_text, .Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level3InterruptVector.text #endif @@ -264,7 +241,7 @@ SECTIONS SECTION_VECTOR (_Level4InterruptVector_text, .Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level4InterruptVector.text #endif @@ -272,7 +249,7 @@ SECTIONS SECTION_VECTOR (_Level5InterruptVector_text, .Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level5InterruptVector.text #endif @@ -280,40 +257,23 @@ SECTIONS SECTION_VECTOR (_Level6InterruptVector_text, .Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR, - SIZEOF(LAST), LAST) + LAST) # undef LAST # define LAST .Level6InterruptVector.text #endif - SECTION_VECTOR (_KernelExceptionVector_literal, - .KernelExceptionVector.literal, - KERNEL_VECTOR_VADDR - 4, - SIZEOF(LAST), LAST) -#undef LAST SECTION_VECTOR (_KernelExceptionVector_text, .KernelExceptionVector.text, KERNEL_VECTOR_VADDR, - 4, - .KernelExceptionVector.literal) - SECTION_VECTOR (_UserExceptionVector_literal, - .UserExceptionVector.literal, - USER_VECTOR_VADDR - 4, - SIZEOF(.KernelExceptionVector.text), - .KernelExceptionVector.text) + LAST) +#undef LAST SECTION_VECTOR (_UserExceptionVector_text, .UserExceptionVector.text, USER_VECTOR_VADDR, - 4, - .UserExceptionVector.literal) - SECTION_VECTOR (_DoubleExceptionVector_literal, - .DoubleExceptionVector.literal, - DOUBLEEXC_VECTOR_VADDR - 20, - SIZEOF(.UserExceptionVector.text), - .UserExceptionVector.text) + .KernelExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, - 20, - .DoubleExceptionVector.literal) + .UserExceptionVector.text) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; @@ -323,7 +283,6 @@ SECTIONS SECTION_VECTOR (_SecondaryResetVector_text, .SecondaryResetVector.text, RESET_VECTOR1_VADDR, - SIZEOF(.DoubleExceptionVector.text), .DoubleExceptionVector.text) . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); @@ -373,5 +332,4 @@ SECTIONS /* Sections to be discarded */ DISCARDS - /DISCARD/ : { *(.exit.literal) } } diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 6723910..04f19de 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -41,7 +41,12 @@ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memmove); +#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER EXPORT_SYMBOL(__strncpy_user); +#endif EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S index 4eb573d..528fe0d 100644 --- a/arch/xtensa/lib/checksum.S +++ b/arch/xtensa/lib/checksum.S @@ -14,9 +14,10 @@ * 2 of the License, or (at your option) any later version. */ -#include <asm/errno.h> +#include <linux/errno.h> #include <linux/linkage.h> #include <variant/core.h> +#include <asm/asmmacro.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments @@ -175,23 +176,8 @@ ENDPROC(csum_partial) /* * Copy from ds while checksumming, otherwise like csum_partial - * - * The macros SRC and DST specify the type of access for the instruction. - * thus we can call a custom exception handler for each access type. */ -#define SRC(y...) \ - 9999: y; \ - .section __ex_table, "a"; \ - .long 9999b, 6001f ; \ - .previous - -#define DST(y...) \ - 9999: y; \ - .section __ex_table, "a"; \ - .long 9999b, 6002f ; \ - .previous - /* unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, int sum, int *src_err_ptr, int *dst_err_ptr) @@ -244,28 +230,28 @@ ENTRY(csum_partial_copy_generic) add a10, a10, a2 /* a10 = end of last 32-byte src chunk */ .Loop5: #endif -SRC( l32i a9, a2, 0 ) -SRC( l32i a8, a2, 4 ) -DST( s32i a9, a3, 0 ) -DST( s32i a8, a3, 4 ) +EX(10f) l32i a9, a2, 0 +EX(10f) l32i a8, a2, 4 +EX(11f) s32i a9, a3, 0 +EX(11f) s32i a8, a3, 4 ONES_ADD(a5, a9) ONES_ADD(a5, a8) -SRC( l32i a9, a2, 8 ) -SRC( l32i a8, a2, 12 ) -DST( s32i a9, a3, 8 ) -DST( s32i a8, a3, 12 ) +EX(10f) l32i a9, a2, 8 +EX(10f) l32i a8, a2, 12 +EX(11f) s32i a9, a3, 8 +EX(11f) s32i a8, a3, 12 ONES_ADD(a5, a9) ONES_ADD(a5, a8) -SRC( l32i a9, a2, 16 ) -SRC( l32i a8, a2, 20 ) -DST( s32i a9, a3, 16 ) -DST( s32i a8, a3, 20 ) +EX(10f) l32i a9, a2, 16 +EX(10f) l32i a8, a2, 20 +EX(11f) s32i a9, a3, 16 +EX(11f) s32i a8, a3, 20 ONES_ADD(a5, a9) ONES_ADD(a5, a8) -SRC( l32i a9, a2, 24 ) -SRC( l32i a8, a2, 28 ) -DST( s32i a9, a3, 24 ) -DST( s32i a8, a3, 28 ) +EX(10f) l32i a9, a2, 24 +EX(10f) l32i a8, a2, 28 +EX(11f) s32i a9, a3, 24 +EX(11f) s32i a8, a3, 28 ONES_ADD(a5, a9) ONES_ADD(a5, a8) addi a2, a2, 32 @@ -284,8 +270,8 @@ DST( s32i a8, a3, 28 ) add a10, a10, a2 /* a10 = end of last 4-byte src chunk */ .Loop6: #endif -SRC( l32i a9, a2, 0 ) -DST( s32i a9, a3, 0 ) +EX(10f) l32i a9, a2, 0 +EX(11f) s32i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 4 addi a3, a3, 4 @@ -315,8 +301,8 @@ DST( s32i a9, a3, 0 ) add a10, a10, a2 /* a10 = end of last 2-byte src chunk */ .Loop7: #endif -SRC( l16ui a9, a2, 0 ) -DST( s16i a9, a3, 0 ) +EX(10f) l16ui a9, a2, 0 +EX(11f) s16i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 2 addi a3, a3, 2 @@ -326,8 +312,8 @@ DST( s16i a9, a3, 0 ) 4: /* This section processes a possible trailing odd byte. */ _bbci.l a4, 0, 8f /* 1-byte chunk */ -SRC( l8ui a9, a2, 0 ) -DST( s8i a9, a3, 0 ) +EX(10f) l8ui a9, a2, 0 +EX(11f) s8i a9, a3, 0 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* shift byte to bits 8..15 */ #endif @@ -350,10 +336,10 @@ DST( s8i a9, a3, 0 ) add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */ .Loop8: #endif -SRC( l8ui a9, a2, 0 ) -SRC( l8ui a8, a2, 1 ) -DST( s8i a9, a3, 0 ) -DST( s8i a8, a3, 1 ) +EX(10f) l8ui a9, a2, 0 +EX(10f) l8ui a8, a2, 1 +EX(11f) s8i a9, a3, 0 +EX(11f) s8i a8, a3, 1 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* combine into a single 16-bit value */ #else /* for checksum computation */ @@ -381,7 +367,7 @@ ENDPROC(csum_partial_copy_generic) a12 = original dst for exception handling */ -6001: +10: _movi a2, -EFAULT s32i a2, a6, 0 /* src_err_ptr */ @@ -403,7 +389,7 @@ ENDPROC(csum_partial_copy_generic) 2: retw -6002: +11: movi a2, -EFAULT s32i a2, a7, 0 /* dst_err_ptr */ movi a2, 0 diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S index b1c219a..c0f6981 100644 --- a/arch/xtensa/lib/memcopy.S +++ b/arch/xtensa/lib/memcopy.S @@ -9,23 +9,9 @@ * Copyright (C) 2002 - 2012 Tensilica Inc. */ +#include <linux/linkage.h> #include <variant/core.h> - - .macro src_b r, w0, w1 -#ifdef __XTENSA_EB__ - src \r, \w0, \w1 -#else - src \r, \w1, \w0 -#endif - .endm - - .macro ssa8 r -#ifdef __XTENSA_EB__ - ssa8b \r -#else - ssa8l \r -#endif - .endm +#include <asm/asmmacro.h> /* * void *memcpy(void *dst, const void *src, size_t len); @@ -123,10 +109,8 @@ addi a5, a5, 2 j .Ldstaligned # dst is now aligned, return to main algorithm - .align 4 - .global memcpy - .type memcpy,@function -memcpy: +ENTRY(__memcpy) +WEAK(memcpy) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len @@ -209,7 +193,7 @@ memcpy: .Lsrcunaligned: _beqz a4, .Ldone # avoid loading anything for zero-length copies # copy 16 bytes per iteration for word-aligned dst and unaligned src - ssa8 a3 # set shift amount from byte offset + __ssa8 a3 # set shift amount from byte offset /* set to 1 when running on ISS (simulator) with the lint or ferret client, or 0 to save a few cycles */ @@ -229,16 +213,16 @@ memcpy: .Loop2: l32i a7, a3, 4 l32i a8, a3, 8 - src_b a6, a6, a7 + __src_b a6, a6, a7 s32i a6, a5, 0 l32i a9, a3, 12 - src_b a7, a7, a8 + __src_b a7, a7, a8 s32i a7, a5, 4 l32i a6, a3, 16 - src_b a8, a8, a9 + __src_b a8, a8, a9 s32i a8, a5, 8 addi a3, a3, 16 - src_b a9, a9, a6 + __src_b a9, a9, a6 s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS @@ -249,10 +233,10 @@ memcpy: # copy 8 bytes l32i a7, a3, 4 l32i a8, a3, 8 - src_b a6, a6, a7 + __src_b a6, a6, a7 s32i a6, a5, 0 addi a3, a3, 8 - src_b a7, a7, a8 + __src_b a7, a7, a8 s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 @@ -261,7 +245,7 @@ memcpy: # copy 4 bytes l32i a7, a3, 4 addi a3, a3, 4 - src_b a6, a6, a7 + __src_b a6, a6, a7 s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 @@ -288,14 +272,14 @@ memcpy: s8i a6, a5, 0 retw +ENDPROC(__memcpy) /* * void bcopy(const void *src, void *dest, size_t n); */ - .align 4 - .global bcopy - .type bcopy,@function -bcopy: + +ENTRY(bcopy) + entry sp, 16 # minimal stack frame # a2=src, a3=dst, a4=len mov a5, a3 @@ -303,6 +287,8 @@ bcopy: mov a2, a5 j .Lmovecommon # go to common code for memmove+bcopy +ENDPROC(bcopy) + /* * void *memmove(void *dst, const void *src, size_t len); * @@ -391,10 +377,8 @@ bcopy: j .Lbackdstaligned # dst is now aligned, # return to main algorithm - .align 4 - .global memmove - .type memmove,@function -memmove: +ENTRY(__memmove) +WEAK(memmove) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len @@ -485,7 +469,7 @@ memmove: .Lbacksrcunaligned: _beqz a4, .Lbackdone # avoid loading anything for zero-length copies # copy 16 bytes per iteration for word-aligned dst and unaligned src - ssa8 a3 # set shift amount from byte offset + __ssa8 a3 # set shift amount from byte offset #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with * the lint or ferret client, or 0 * to save a few cycles */ @@ -506,15 +490,15 @@ memmove: l32i a7, a3, 12 l32i a8, a3, 8 addi a5, a5, -16 - src_b a6, a7, a6 + __src_b a6, a7, a6 s32i a6, a5, 12 l32i a9, a3, 4 - src_b a7, a8, a7 + __src_b a7, a8, a7 s32i a7, a5, 8 l32i a6, a3, 0 - src_b a8, a9, a8 + __src_b a8, a9, a8 s32i a8, a5, 4 - src_b a9, a6, a9 + __src_b a9, a6, a9 s32i a9, a5, 0 #if !XCHAL_HAVE_LOOPS bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start @@ -526,9 +510,9 @@ memmove: l32i a7, a3, 4 l32i a8, a3, 0 addi a5, a5, -8 - src_b a6, a7, a6 + __src_b a6, a7, a6 s32i a6, a5, 4 - src_b a7, a8, a7 + __src_b a7, a8, a7 s32i a7, a5, 0 mov a6, a8 .Lback12: @@ -537,7 +521,7 @@ memmove: addi a3, a3, -4 l32i a7, a3, 0 addi a5, a5, -4 - src_b a6, a7, a6 + __src_b a6, a7, a6 s32i a6, a5, 0 mov a6, a7 .Lback13: @@ -566,11 +550,4 @@ memmove: s8i a6, a5, 0 retw - -/* - * Local Variables: - * mode:fundamental - * comment-start: "# " - * comment-start-skip: "# *" - * End: - */ +ENDPROC(__memmove) diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S index 10b8c40..276747d 100644 --- a/arch/xtensa/lib/memset.S +++ b/arch/xtensa/lib/memset.S @@ -11,7 +11,9 @@ * Copyright (C) 2002 Tensilica Inc. */ +#include <linux/linkage.h> #include <variant/core.h> +#include <asm/asmmacro.h> /* * void *memset(void *dst, int c, size_t length) @@ -28,20 +30,10 @@ * the alignment labels). */ -/* Load or store instructions that may cause exceptions use the EX macro. */ - -#define EX(insn,reg1,reg2,offset,handler) \ -9: insn reg1, reg2, offset; \ - .section __ex_table, "a"; \ - .word 9b, handler; \ - .previous - - .text -.align 4 -.global memset -.type memset,@function -memset: +ENTRY(__memset) +WEAK(memset) + entry sp, 16 # minimal stack frame # a2/ dst, a3/ c, a4/ length extui a3, a3, 0, 8 # mask to just 8 bits @@ -73,10 +65,10 @@ memset: add a6, a6, a5 # a6 = end of last 16B chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: - EX(s32i, a3, a5, 0, memset_fixup) - EX(s32i, a3, a5, 4, memset_fixup) - EX(s32i, a3, a5, 8, memset_fixup) - EX(s32i, a3, a5, 12, memset_fixup) +EX(10f) s32i a3, a5, 0 +EX(10f) s32i a3, a5, 4 +EX(10f) s32i a3, a5, 8 +EX(10f) s32i a3, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a5, a6, .Loop1 @@ -84,23 +76,23 @@ memset: .Loop1done: bbci.l a4, 3, .L2 # set 8 bytes - EX(s32i, a3, a5, 0, memset_fixup) - EX(s32i, a3, a5, 4, memset_fixup) +EX(10f) s32i a3, a5, 0 +EX(10f) s32i a3, a5, 4 addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # set 4 bytes - EX(s32i, a3, a5, 0, memset_fixup) +EX(10f) s32i a3, a5, 0 addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # set 2 bytes - EX(s16i, a3, a5, 0, memset_fixup) +EX(10f) s16i a3, a5, 0 addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # set 1 byte - EX(s8i, a3, a5, 0, memset_fixup) +EX(10f) s8i a3, a5, 0 .L5: .Lret1: retw @@ -114,7 +106,7 @@ memset: bbci.l a5, 0, .L20 # branch if dst alignment half-aligned # dst is only byte aligned # set 1 byte - EX(s8i, a3, a5, 0, memset_fixup) +EX(10f) s8i a3, a5, 0 addi a5, a5, 1 addi a4, a4, -1 # now retest if dst aligned @@ -122,7 +114,7 @@ memset: .L20: # dst half-aligned # set 2 bytes - EX(s16i, a3, a5, 0, memset_fixup) +EX(10f) s16i a3, a5, 0 addi a5, a5, 2 addi a4, a4, -2 j .L0 # dst is now aligned, return to main algorithm @@ -141,7 +133,7 @@ memset: add a6, a5, a4 # a6 = ending address #endif /* !XCHAL_HAVE_LOOPS */ .Lbyteloop: - EX(s8i, a3, a5, 0, memset_fixup) +EX(10f) s8i a3, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a5, a6, .Lbyteloop @@ -149,12 +141,13 @@ memset: .Lbytesetdone: retw +ENDPROC(__memset) .section .fixup, "ax" .align 4 /* We return zero if a failure occurred. */ -memset_fixup: +10: movi a2, 0 retw diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c index 34d05ab..a2b5581 100644 --- a/arch/xtensa/lib/pci-auto.c +++ b/arch/xtensa/lib/pci-auto.c @@ -49,17 +49,6 @@ * */ - -/* define DEBUG to print some debugging messages. */ - -#undef DEBUG - -#ifdef DEBUG -# define DBG(x...) printk(x) -#else -# define DBG(x...) -#endif - static int pciauto_upper_iospc; static int pciauto_upper_memspc; @@ -97,7 +86,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) { bar_size &= PCI_BASE_ADDRESS_IO_MASK; upper_limit = &pciauto_upper_iospc; - DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr); + pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr); } else { @@ -107,7 +96,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) bar_size &= PCI_BASE_ADDRESS_MEM_MASK; upper_limit = &pciauto_upper_memspc; - DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr); + pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr); } /* Allocate a base address (bar_size is negative!) */ @@ -125,7 +114,8 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) if (found_mem64) pci_write_config_dword(dev, (bar+=4), 0x00000000); - DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit); + pr_debug("size=0x%x, address=0x%x\n", + ~bar_size + 1, *upper_limit); } } @@ -150,7 +140,7 @@ pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn) if (irq == -1) irq = 0; - DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); + pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } @@ -289,8 +279,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) int iosave, memsave; - DBG("PCI Autoconfig: Found P2P bridge, device %d\n", - PCI_SLOT(pci_devfn)); + pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n", + PCI_SLOT(pci_devfn)); /* Allocate PCI I/O and/or memory space */ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); @@ -306,23 +296,6 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) } - -#if 0 - /* Skip legacy mode IDE controller */ - - if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) { - - unsigned char prg_iface; - pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface); - - if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) { - DBG("PCI Autoconfig: Skipping legacy mode " - "IDE controller\n"); - continue; - } - } -#endif - /* * Found a peripheral, enable some standard * settings @@ -337,8 +310,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); /* Allocate PCI I/O and/or memory space */ - DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", - current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) ); + pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", + current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn)); pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); pciauto_setup_irq(pci_ctrl, dev, pci_devfn); diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S index 1ad0ecf..5fce16b 100644 --- a/arch/xtensa/lib/strncpy_user.S +++ b/arch/xtensa/lib/strncpy_user.S @@ -11,16 +11,10 @@ * Copyright (C) 2002 Tensilica Inc. */ -#include <variant/core.h> #include <linux/errno.h> - -/* Load or store instructions that may cause exceptions use the EX macro. */ - -#define EX(insn,reg1,reg2,offset,handler) \ -9: insn reg1, reg2, offset; \ - .section __ex_table, "a"; \ - .word 9b, handler; \ - .previous +#include <linux/linkage.h> +#include <variant/core.h> +#include <asm/asmmacro.h> /* * char *__strncpy_user(char *dst, const char *src, size_t len) @@ -54,10 +48,8 @@ # a12/ tmp .text -.align 4 -.global __strncpy_user -.type __strncpy_user,@function -__strncpy_user: +ENTRY(__strncpy_user) + entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len mov a11, a2 # leave dst in return value register @@ -75,9 +67,9 @@ __strncpy_user: j .Ldstunaligned .Lsrc1mod2: # src address is odd - EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 +EX(11f) l8ui a9, a3, 0 # get byte 0 addi a3, a3, 1 # advance src pointer - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 +EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len @@ -85,16 +77,16 @@ __strncpy_user: bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned .Lsrc2mod4: # src address is 2 mod 4 - EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 +EX(11f) l8ui a9, a3, 0 # get byte 0 /* 1-cycle interlock */ - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 +EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len beqz a4, .Lret # if len is zero - EX(l8ui, a9, a3, 1, fixup_l) # get byte 0 +EX(11f) l8ui a9, a3, 1 # get byte 0 addi a3, a3, 2 # advance src pointer - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 +EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len @@ -117,12 +109,12 @@ __strncpy_user: add a12, a12, a11 # a12 = end of last 4B chunck #endif .Loop1: - EX(l32i, a9, a3, 0, fixup_l) # get word from src +EX(11f) l32i a9, a3, 0 # get word from src addi a3, a3, 4 # advance src pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a7, .Lz2 # if byte 2 is zero - EX(s32i, a9, a11, 0, fixup_s) # store word to dst +EX(10f) s32i a9, a11, 0 # store word to dst bnone a9, a8, .Lz3 # if byte 3 is zero addi a11, a11, 4 # advance dst pointer #if !XCHAL_HAVE_LOOPS @@ -132,7 +124,7 @@ __strncpy_user: .Loop1done: bbci.l a4, 1, .L100 # copy 2 bytes - EX(l16ui, a9, a3, 0, fixup_l) +EX(11f) l16ui a9, a3, 0 addi a3, a3, 2 # advance src pointer #ifdef __XTENSA_EB__ bnone a9, a7, .Lz0 # if byte 2 is zero @@ -141,13 +133,13 @@ __strncpy_user: bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero #endif - EX(s16i, a9, a11, 0, fixup_s) +EX(10f) s16i a9, a11, 0 addi a11, a11, 2 # advance dst pointer .L100: bbci.l a4, 0, .Lret - EX(l8ui, a9, a3, 0, fixup_l) +EX(11f) l8ui a9, a3, 0 /* slot */ - EX(s8i, a9, a11, 0, fixup_s) +EX(10f) s8i a9, a11, 0 beqz a9, .Lret # if byte is zero addi a11, a11, 1-3 # advance dst ptr 1, but also cancel # the effect of adding 3 in .Lz3 code @@ -161,14 +153,14 @@ __strncpy_user: #ifdef __XTENSA_EB__ movi a9, 0 #endif /* __XTENSA_EB__ */ - EX(s8i, a9, a11, 0, fixup_s) +EX(10f) s8i a9, a11, 0 sub a2, a11, a2 # compute strlen retw .Lz1: # byte 1 is zero #ifdef __XTENSA_EB__ extui a9, a9, 16, 16 #endif /* __XTENSA_EB__ */ - EX(s16i, a9, a11, 0, fixup_s) +EX(10f) s16i a9, a11, 0 addi a11, a11, 1 # advance dst pointer sub a2, a11, a2 # compute strlen retw @@ -176,9 +168,9 @@ __strncpy_user: #ifdef __XTENSA_EB__ extui a9, a9, 16, 16 #endif /* __XTENSA_EB__ */ - EX(s16i, a9, a11, 0, fixup_s) +EX(10f) s16i a9, a11, 0 movi a9, 0 - EX(s8i, a9, a11, 2, fixup_s) +EX(10f) s8i a9, a11, 2 addi a11, a11, 2 # advance dst pointer sub a2, a11, a2 # compute strlen retw @@ -196,9 +188,9 @@ __strncpy_user: add a12, a11, a4 # a12 = ending address #endif /* XCHAL_HAVE_LOOPS */ .Lnextbyte: - EX(l8ui, a9, a3, 0, fixup_l) +EX(11f) l8ui a9, a3, 0 addi a3, a3, 1 - EX(s8i, a9, a11, 0, fixup_s) +EX(10f) s8i a9, a11, 0 beqz a9, .Lunalignedend addi a11, a11, 1 #if !XCHAL_HAVE_LOOPS @@ -209,6 +201,7 @@ __strncpy_user: sub a2, a11, a2 # compute strlen retw +ENDPROC(__strncpy_user) .section .fixup, "ax" .align 4 @@ -218,8 +211,7 @@ __strncpy_user: * implementation in memset(). Thus, we differentiate between * load/store fixups. */ -fixup_s: -fixup_l: +10: +11: movi a2, -EFAULT retw - diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S index 4c03b1e..0b956ce 100644 --- a/arch/xtensa/lib/strnlen_user.S +++ b/arch/xtensa/lib/strnlen_user.S @@ -11,15 +11,9 @@ * Copyright (C) 2002 Tensilica Inc. */ +#include <linux/linkage.h> #include <variant/core.h> - -/* Load or store instructions that may cause exceptions use the EX macro. */ - -#define EX(insn,reg1,reg2,offset,handler) \ -9: insn reg1, reg2, offset; \ - .section __ex_table, "a"; \ - .word 9b, handler; \ - .previous +#include <asm/asmmacro.h> /* * size_t __strnlen_user(const char *s, size_t len) @@ -49,10 +43,8 @@ # a10/ tmp .text -.align 4 -.global __strnlen_user -.type __strnlen_user,@function -__strnlen_user: +ENTRY(__strnlen_user) + entry sp, 16 # minimal stack frame # a2/ s, a3/ len addi a4, a2, -4 # because we overincrement at the end; @@ -77,7 +69,7 @@ __strnlen_user: add a10, a10, a4 # a10 = end of last 4B chunk #endif /* XCHAL_HAVE_LOOPS */ .Loop: - EX(l32i, a9, a4, 4, lenfixup) # get next word of string +EX(10f) l32i a9, a4, 4 # get next word of string addi a4, a4, 4 # advance string pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero @@ -88,7 +80,7 @@ __strnlen_user: #endif .Ldone: - EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks +EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks bbci.l a3, 1, .L100 # check two more bytes (bytes 0, 1 of word) @@ -125,14 +117,14 @@ __strnlen_user: retw .L1mod2: # address is odd - EX(l8ui, a9, a4, 4, lenfixup) # get byte 0 +EX(10f) l8ui a9, a4, 4 # get byte 0 addi a4, a4, 1 # advance string pointer beqz a9, .Lz3 # if byte 0 is zero bbci.l a4, 1, .Laligned # if string pointer is now word-aligned .L2mod4: # address is 2 mod 4 addi a4, a4, 2 # advance ptr for aligned access - EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string +EX(10f) l32i a9, a4, 0 # get word with first two bytes of string bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero # byte 3 is zero @@ -140,8 +132,10 @@ __strnlen_user: sub a2, a4, a2 # subtract to get length retw +ENDPROC(__strnlen_user) + .section .fixup, "ax" .align 4 -lenfixup: +10: movi a2, 0 retw diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S index d9cd766..64ab197 100644 --- a/arch/xtensa/lib/usercopy.S +++ b/arch/xtensa/lib/usercopy.S @@ -53,30 +53,13 @@ * a11/ original length */ +#include <linux/linkage.h> #include <variant/core.h> - -#ifdef __XTENSA_EB__ -#define ALIGN(R, W0, W1) src R, W0, W1 -#define SSA8(R) ssa8b R -#else -#define ALIGN(R, W0, W1) src R, W1, W0 -#define SSA8(R) ssa8l R -#endif - -/* Load or store instructions that may cause exceptions use the EX macro. */ - -#define EX(insn,reg1,reg2,offset,handler) \ -9: insn reg1, reg2, offset; \ - .section __ex_table, "a"; \ - .word 9b, handler; \ - .previous - +#include <asm/asmmacro.h> .text - .align 4 - .global __xtensa_copy_user - .type __xtensa_copy_user,@function -__xtensa_copy_user: +ENTRY(__xtensa_copy_user) + entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value @@ -89,7 +72,7 @@ __xtensa_copy_user: # per iteration movi a8, 3 # if source is also aligned, bnone a3, a8, .Laligned # then use word copy - SSA8( a3) # set shift amount from byte offset + __ssa8 a3 # set shift amount from byte offset bnez a4, .Lsrcunaligned movi a2, 0 # return success for len==0 retw @@ -102,9 +85,9 @@ __xtensa_copy_user: bltui a4, 7, .Lbytecopy # do short copies byte by byte # copy 1 byte - EX(l8ui, a6, a3, 0, fixup) +EX(10f) l8ui a6, a3, 0 addi a3, a3, 1 - EX(s8i, a6, a5, 0, fixup) +EX(10f) s8i a6, a5, 0 addi a5, a5, 1 addi a4, a4, -1 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then @@ -112,11 +95,11 @@ __xtensa_copy_user: .Ldst2mod4: # dst 16-bit aligned # copy 2 bytes bltui a4, 6, .Lbytecopy # do short copies byte by byte - EX(l8ui, a6, a3, 0, fixup) - EX(l8ui, a7, a3, 1, fixup) +EX(10f) l8ui a6, a3, 0 +EX(10f) l8ui a7, a3, 1 addi a3, a3, 2 - EX(s8i, a6, a5, 0, fixup) - EX(s8i, a7, a5, 1, fixup) +EX(10f) s8i a6, a5, 0 +EX(10f) s8i a7, a5, 1 addi a5, a5, 2 addi a4, a4, -2 j .Ldstaligned # dst is now aligned, return to main algorithm @@ -135,9 +118,9 @@ __xtensa_copy_user: add a7, a3, a4 # a7 = end address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lnextbyte: - EX(l8ui, a6, a3, 0, fixup) +EX(10f) l8ui a6, a3, 0 addi a3, a3, 1 - EX(s8i, a6, a5, 0, fixup) +EX(10f) s8i a6, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a3, a7, .Lnextbyte @@ -161,15 +144,15 @@ __xtensa_copy_user: add a8, a8, a3 # a8 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: - EX(l32i, a6, a3, 0, fixup) - EX(l32i, a7, a3, 4, fixup) - EX(s32i, a6, a5, 0, fixup) - EX(l32i, a6, a3, 8, fixup) - EX(s32i, a7, a5, 4, fixup) - EX(l32i, a7, a3, 12, fixup) - EX(s32i, a6, a5, 8, fixup) +EX(10f) l32i a6, a3, 0 +EX(10f) l32i a7, a3, 4 +EX(10f) s32i a6, a5, 0 +EX(10f) l32i a6, a3, 8 +EX(10f) s32i a7, a5, 4 +EX(10f) l32i a7, a3, 12 +EX(10f) s32i a6, a5, 8 addi a3, a3, 16 - EX(s32i, a7, a5, 12, fixup) +EX(10f) s32i a7, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a8, .Loop1 @@ -177,31 +160,31 @@ __xtensa_copy_user: .Loop1done: bbci.l a4, 3, .L2 # copy 8 bytes - EX(l32i, a6, a3, 0, fixup) - EX(l32i, a7, a3, 4, fixup) +EX(10f) l32i a6, a3, 0 +EX(10f) l32i a7, a3, 4 addi a3, a3, 8 - EX(s32i, a6, a5, 0, fixup) - EX(s32i, a7, a5, 4, fixup) +EX(10f) s32i a6, a5, 0 +EX(10f) s32i a7, a5, 4 addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # copy 4 bytes - EX(l32i, a6, a3, 0, fixup) +EX(10f) l32i a6, a3, 0 addi a3, a3, 4 - EX(s32i, a6, a5, 0, fixup) +EX(10f) s32i a6, a5, 0 addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # copy 2 bytes - EX(l16ui, a6, a3, 0, fixup) +EX(10f) l16ui a6, a3, 0 addi a3, a3, 2 - EX(s16i, a6, a5, 0, fixup) +EX(10f) s16i a6, a5, 0 addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # copy 1 byte - EX(l8ui, a6, a3, 0, fixup) - EX(s8i, a6, a5, 0, fixup) +EX(10f) l8ui a6, a3, 0 +EX(10f) s8i a6, a5, 0 .L5: movi a2, 0 # return success for len bytes copied retw @@ -217,7 +200,7 @@ __xtensa_copy_user: # copy 16 bytes per iteration for word-aligned dst and unaligned src and a10, a3, a8 # save unalignment offset for below sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) - EX(l32i, a6, a3, 0, fixup) # load first word +EX(10f) l32i a6, a3, 0 # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ @@ -226,19 +209,19 @@ __xtensa_copy_user: add a12, a12, a3 # a12 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: - EX(l32i, a7, a3, 4, fixup) - EX(l32i, a8, a3, 8, fixup) - ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, fixup) - EX(l32i, a9, a3, 12, fixup) - ALIGN( a7, a7, a8) - EX(s32i, a7, a5, 4, fixup) - EX(l32i, a6, a3, 16, fixup) - ALIGN( a8, a8, a9) - EX(s32i, a8, a5, 8, fixup) +EX(10f) l32i a7, a3, 4 +EX(10f) l32i a8, a3, 8 + __src_b a6, a6, a7 +EX(10f) s32i a6, a5, 0 +EX(10f) l32i a9, a3, 12 + __src_b a7, a7, a8 +EX(10f) s32i a7, a5, 4 +EX(10f) l32i a6, a3, 16 + __src_b a8, a8, a9 +EX(10f) s32i a8, a5, 8 addi a3, a3, 16 - ALIGN( a9, a9, a6) - EX(s32i, a9, a5, 12, fixup) + __src_b a9, a9, a6 +EX(10f) s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a12, .Loop2 @@ -246,43 +229,44 @@ __xtensa_copy_user: .Loop2done: bbci.l a4, 3, .L12 # copy 8 bytes - EX(l32i, a7, a3, 4, fixup) - EX(l32i, a8, a3, 8, fixup) - ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, fixup) +EX(10f) l32i a7, a3, 4 +EX(10f) l32i a8, a3, 8 + __src_b a6, a6, a7 +EX(10f) s32i a6, a5, 0 addi a3, a3, 8 - ALIGN( a7, a7, a8) - EX(s32i, a7, a5, 4, fixup) + __src_b a7, a7, a8 +EX(10f) s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 .L12: bbci.l a4, 2, .L13 # copy 4 bytes - EX(l32i, a7, a3, 4, fixup) +EX(10f) l32i a7, a3, 4 addi a3, a3, 4 - ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, fixup) + __src_b a6, a6, a7 +EX(10f) s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 .L13: add a3, a3, a10 # readjust a3 with correct misalignment bbci.l a4, 1, .L14 # copy 2 bytes - EX(l8ui, a6, a3, 0, fixup) - EX(l8ui, a7, a3, 1, fixup) +EX(10f) l8ui a6, a3, 0 +EX(10f) l8ui a7, a3, 1 addi a3, a3, 2 - EX(s8i, a6, a5, 0, fixup) - EX(s8i, a7, a5, 1, fixup) +EX(10f) s8i a6, a5, 0 +EX(10f) s8i a7, a5, 1 addi a5, a5, 2 .L14: bbci.l a4, 0, .L15 # copy 1 byte - EX(l8ui, a6, a3, 0, fixup) - EX(s8i, a6, a5, 0, fixup) +EX(10f) l8ui a6, a3, 0 +EX(10f) s8i a6, a5, 0 .L15: movi a2, 0 # return success for len bytes copied retw +ENDPROC(__xtensa_copy_user) .section .fixup, "ax" .align 4 @@ -294,7 +278,7 @@ __xtensa_copy_user: */ -fixup: +10: sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */ retw diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile index 0b3d296..734888a 100644 --- a/arch/xtensa/mm/Makefile +++ b/arch/xtensa/mm/Makefile @@ -5,3 +5,8 @@ obj-y := init.o misc.o obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o obj-$(CONFIG_HIGHMEM) += highmem.o +obj-$(CONFIG_KASAN) += kasan_init.o + +KASAN_SANITIZE_fault.o := n +KASAN_SANITIZE_kasan_init.o := n +KASAN_SANITIZE_mmu.o := n diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 3c75c4e..57dc231 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -33,9 +33,6 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> -//#define printd(x...) printk(x) -#define printd(x...) do { } while(0) - /* * Note: * The kernel provides one architecture bit PG_arch_1 in the page flags that diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index a14df5a..8b9b6f4 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -25,8 +25,6 @@ DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; void bad_page_fault(struct pt_regs*, unsigned long, int); -#undef DEBUG_PAGE_FAULT - /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -68,10 +66,10 @@ void do_page_fault(struct pt_regs *regs) exccause == EXCCAUSE_ITLB_MISS || exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; -#ifdef DEBUG_PAGE_FAULT - printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, - address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); -#endif + pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n", + current->comm, current->pid, + address, exccause, regs->pc, + is_write ? "w" : "", is_exec ? "x" : ""); if (user_mode(regs)) flags |= FAULT_FLAG_USER; @@ -247,10 +245,8 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) /* Are we prepared to handle this kernel fault? */ if ((entry = search_exception_tables(regs->pc)) != NULL) { -#ifdef DEBUG_PAGE_FAULT - printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", - current->comm, regs->pc, entry->fixup); -#endif + pr_debug("%s: Exception at pc=%#010lx (%lx)\n", + current->comm, regs->pc, entry->fixup); current->thread.bad_uaddr = address; regs->pc = entry->fixup; return; @@ -259,9 +255,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) /* Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - printk(KERN_ALERT "Unable to handle kernel paging request at virtual " - "address %08lx\n pc = %08lx, ra = %08lx\n", - address, regs->pc, regs->areg[0]); + pr_alert("Unable to handle kernel paging request at virtual " + "address %08lx\n pc = %08lx, ra = %08lx\n", + address, regs->pc, regs->areg[0]); die("Oops", regs, sig); do_exit(sig); } diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 720fe4e..d776ec0 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -100,29 +100,51 @@ void __init mem_init(void) mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" +#ifdef CONFIG_KASAN + " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" +#endif +#ifdef CONFIG_MMU + " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" +#endif #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" #endif -#ifdef CONFIG_MMU - " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n" + " .text : 0x%08lx - 0x%08lx (%5lu kB)\n" + " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n" + " .data : 0x%08lx - 0x%08lx (%5lu kB)\n" + " .init : 0x%08lx - 0x%08lx (%5lu kB)\n" + " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n", +#ifdef CONFIG_KASAN + KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE, + KASAN_SHADOW_SIZE >> 20, #endif - " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", +#ifdef CONFIG_MMU + VMALLOC_START, VMALLOC_END, + (VMALLOC_END - VMALLOC_START) >> 20, #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, #endif -#ifdef CONFIG_MMU - VMALLOC_START, VMALLOC_END, - (VMALLOC_END - VMALLOC_START) >> 20, PAGE_OFFSET, PAGE_OFFSET + (max_low_pfn - min_low_pfn) * PAGE_SIZE, #else min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE, #endif - ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); + ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20, + (unsigned long)_text, (unsigned long)_etext, + (unsigned long)(_etext - _text) >> 10, + (unsigned long)__start_rodata, (unsigned long)_sdata, + (unsigned long)(_sdata - __start_rodata) >> 10, + (unsigned long)_sdata, (unsigned long)_edata, + (unsigned long)(_edata - _sdata) >> 10, + (unsigned long)__init_begin, (unsigned long)__init_end, + (unsigned long)(__init_end - __init_begin) >> 10, + (unsigned long)__bss_start, (unsigned long)__bss_stop, + (unsigned long)(__bss_stop - __bss_start) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c new file mode 100644 index 0000000..6b532b6 --- /dev/null +++ b/arch/xtensa/mm/kasan_init.c @@ -0,0 +1,95 @@ +/* + * Xtensa KASAN shadow map initialization + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2017 Cadence Design Systems Inc. + */ + +#include <linux/bootmem.h> +#include <linux/init_task.h> +#include <linux/kasan.h> +#include <linux/kernel.h> +#include <linux/memblock.h> +#include <asm/initialize_mmu.h> +#include <asm/tlbflush.h> +#include <asm/traps.h> + +void __init kasan_early_init(void) +{ + unsigned long vaddr = KASAN_SHADOW_START; + pgd_t *pgd = pgd_offset_k(vaddr); + pmd_t *pmd = pmd_offset(pgd, vaddr); + int i; + + for (i = 0; i < PTRS_PER_PTE; ++i) + set_pte(kasan_zero_pte + i, + mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL)); + + for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { + BUG_ON(!pmd_none(*pmd)); + set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte)); + } + early_trap_init(); +} + +static void __init populate(void *start, void *end) +{ + unsigned long n_pages = (end - start) / PAGE_SIZE; + unsigned long n_pmds = n_pages / PTRS_PER_PTE; + unsigned long i, j; + unsigned long vaddr = (unsigned long)start; + pgd_t *pgd = pgd_offset_k(vaddr); + pmd_t *pmd = pmd_offset(pgd, vaddr); + pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); + + pr_debug("%s: %p - %p\n", __func__, start, end); + + for (i = j = 0; i < n_pmds; ++i) { + int k; + + for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { + phys_addr_t phys = + memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, + MEMBLOCK_ALLOC_ANYWHERE); + + set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); + } + } + + for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) + set_pmd(pmd + i, __pmd((unsigned long)pte)); + + local_flush_tlb_all(); + memset(start, 0, end - start); +} + +void __init kasan_init(void) +{ + int i; + + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - + (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); + BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); + + /* + * Replace shadow map pages that cover addresses from VMALLOC area + * start to the end of KSEG with clean writable pages. + */ + populate(kasan_mem_to_shadow((void *)VMALLOC_START), + kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); + + /* Write protect kasan_zero_page and zero-initialize it again. */ + for (i = 0; i < PTRS_PER_PTE; ++i) + set_pte(kasan_zero_pte + i, + mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO)); + + local_flush_tlb_all(); + memset(kasan_zero_page, 0, PAGE_SIZE); + + /* At this point kasan is fully initialized. Enable error messages. */ + current->kasan_depth = 0; + pr_info("KernelAddressSanitizer initialized\n"); +} diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 358d748..9d1ecfc 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -56,7 +56,6 @@ static void __init fixedrange_init(void) void __init paging_init(void) { - memset(swapper_pg_dir, 0, PAGE_SIZE); #ifdef CONFIG_HIGHMEM fixedrange_init(); pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); @@ -82,6 +81,23 @@ void init_mmu(void) set_itlbcfg_register(0); set_dtlbcfg_register(0); #endif + init_kio(); + local_flush_tlb_all(); + + /* Set rasid register to a known value. */ + + set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); + + /* Set PTEVADDR special register to the start of the page + * table, which is in kernel mappable space (ie. not + * statically mapped). This register's value is undefined on + * reset. + */ + set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR); +} + +void init_kio(void) +{ #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) /* * Update the IO area mapping in case xtensa_kio_paddr has changed @@ -95,17 +111,4 @@ void init_mmu(void) write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), XCHAL_KIO_BYPASS_VADDR + 6); #endif - - local_flush_tlb_all(); - - /* Set rasid register to a known value. */ - - set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); - - /* Set PTEVADDR special register to the start of the page - * table, which is in kernel mappable space (ie. not - * statically mapped). This register's value is undefined on - * reset. - */ - set_ptevaddr_register(PGTABLE_START); } diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 35c8222..59153d0 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -95,10 +95,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma, if (mm->context.asid[cpu] == NO_CONTEXT) return; -#if 0 - printk("[tlbrange<%02lx,%08lx,%08lx>]\n", - (unsigned long)mm->context.asid[cpu], start, end); -#endif + pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n", + (unsigned long)mm->context.asid[cpu], start, end); local_irq_save(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index 464c268..92f567f 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c @@ -185,7 +185,7 @@ int __init rs_init(void) serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); - printk ("%s %s\n", serial_name, serial_version); + pr_info("%s %s\n", serial_name, serial_version); /* Initialize the tty_driver structure */ @@ -214,7 +214,7 @@ static __exit void rs_exit(void) int error; if ((error = tty_unregister_driver(serial_driver))) - printk("ISS_SERIAL: failed to unregister serial driver (%d)\n", + pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n", error); put_tty_driver(serial_driver); tty_port_destroy(&serial_port); diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 6363b18..d027ddd 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -16,6 +16,8 @@ * */ +#define pr_fmt(fmt) "%s: " fmt, __func__ + #include <linux/list.h> #include <linux/irq.h> #include <linux/spinlock.h> @@ -606,8 +608,6 @@ struct iss_net_init { * those fields. They will be later initialized in iss_net_init. */ -#define ERR KERN_ERR "iss_net_setup: " - static int __init iss_net_setup(char *str) { struct iss_net_private *device = NULL; @@ -619,14 +619,14 @@ static int __init iss_net_setup(char *str) end = strchr(str, '='); if (!end) { - printk(ERR "Expected '=' after device number\n"); + pr_err("Expected '=' after device number\n"); return 1; } *end = 0; rc = kstrtouint(str, 0, &n); *end = '='; if (rc < 0) { - printk(ERR "Failed to parse '%s'\n", str); + pr_err("Failed to parse '%s'\n", str); return 1; } str = end; @@ -642,13 +642,13 @@ static int __init iss_net_setup(char *str) spin_unlock(&devices_lock); if (device && device->index == n) { - printk(ERR "Device %u already configured\n", n); + pr_err("Device %u already configured\n", n); return 1; } new = alloc_bootmem(sizeof(*new)); if (new == NULL) { - printk(ERR "Alloc_bootmem failed\n"); + pr_err("Alloc_bootmem failed\n"); return 1; } @@ -660,8 +660,6 @@ static int __init iss_net_setup(char *str) return 1; } -#undef ERR - __setup("eth", iss_net_setup); /* |