diff options
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/fpu.h | 3 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/fpu.c | 16 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 16 | ||||
-rw-r--r-- | arch/sh/lib/Makefile | 7 | ||||
-rw-r--r-- | arch/sh/lib/memset-sh4.S | 107 |
5 files changed, 143 insertions, 6 deletions
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 1d3aee0..bfd78e1 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -19,6 +19,7 @@ static inline void grab_fpu(struct pt_regs *regs) struct task_struct; extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); +void fpu_state_restore(struct pt_regs *regs); #else #define release_fpu(regs) do { } while (0) @@ -44,6 +45,8 @@ static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) preempt_disable(); if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) save_fpu(tsk, regs); + else + tsk->fpu_counter = 0; preempt_enable(); } diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index e3ea541..d79226f 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -483,18 +483,18 @@ BUILD_TRAP_HANDLER(fpu_error) force_sig(SIGFPE, tsk); } -BUILD_TRAP_HANDLER(fpu_state_restore) +void fpu_state_restore(struct pt_regs *regs) { struct task_struct *tsk = current; - TRAP_HANDLER_DECL; grab_fpu(regs); - if (!user_mode(regs)) { + if (unlikely(!user_mode(regs))) { printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); + BUG(); return; } - if (used_math()) { + if (likely(used_math())) { /* Using the FPU again. */ restore_fpu(tsk); } else { @@ -503,4 +503,12 @@ BUILD_TRAP_HANDLER(fpu_state_restore) set_used_math(); } set_tsk_thread_flag(tsk, TIF_USEDFPU); + tsk->fpu_counter++; +} + +BUILD_TRAP_HANDLER(fpu_state_restore) +{ + TRAP_HANDLER_DECL; + + fpu_state_restore(regs); } diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index a40342b..7733f5f 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -290,8 +290,14 @@ static void ubc_set_tracing(int asid, unsigned long pc) __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev, struct task_struct *next) { + struct thread_struct *next_t = &next->thread; + #if defined(CONFIG_SH_FPU) unlazy_fpu(prev, task_pt_regs(prev)); + + /* we're going to use this soon, after a few expensive things */ + if (next->fpu_counter > 5) + prefetch(&next_t->fpu.hard); #endif #ifdef CONFIG_MMU @@ -323,6 +329,16 @@ __switch_to(struct task_struct *prev, struct task_struct *next) #endif } +#if defined(CONFIG_SH_FPU) + /* If the task has used fpu the last 5 timeslices, just do a full + * restore of the math state immediately to avoid the trap; the + * chances of needing FPU soon are obviously high now + */ + if (next->fpu_counter > 5) { + fpu_state_restore(task_pt_regs(next)); + } +#endif + return prev; } diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index a969b47..dab4d21 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -2,7 +2,7 @@ # Makefile for SuperH-specific library files.. # -lib-y = delay.o memset.o memmove.o memchr.o \ +lib-y = delay.o memmove.o memchr.o \ checksum.o strlen.o div64.o div64-generic.o # Extracted from libgcc @@ -23,8 +23,11 @@ obj-y += io.o memcpy-y := memcpy.o memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o +memset-y := memset.o +memset-$(CONFIG_CPU_SH4) := memset-sh4.o + lib-$(CONFIG_MMU) += copy_page.o __clear_user.o lib-$(CONFIG_MCOUNT) += mcount.o -lib-y += $(memcpy-y) $(udivsi3-y) +lib-y += $(memcpy-y) $(memset-y) $(udivsi3-y) EXTRA_CFLAGS += -Werror diff --git a/arch/sh/lib/memset-sh4.S b/arch/sh/lib/memset-sh4.S new file mode 100644 index 0000000..1a6e32c --- /dev/null +++ b/arch/sh/lib/memset-sh4.S @@ -0,0 +1,107 @@ +/* + * "memset" implementation for SH4 + * + * Copyright (C) 1999 Niibe Yutaka + * Copyright (c) 2009 STMicroelectronics Limited + * Author: Stuart Menefy <stuart.menefy:st.com> + */ + +/* + * void *memset(void *s, int c, size_t n); + */ + +#include <linux/linkage.h> + +ENTRY(memset) + mov #12,r0 + add r6,r4 + cmp/gt r6,r0 + bt/s 40f ! if it's too small, set a byte at once + mov r4,r0 + and #3,r0 + cmp/eq #0,r0 + bt/s 2f ! It's aligned + sub r0,r6 +1: + dt r0 + bf/s 1b + mov.b r5,@-r4 +2: ! make VVVV + extu.b r5,r5 + swap.b r5,r0 ! V0 + or r0,r5 ! VV + swap.w r5,r0 ! VV00 + or r0,r5 ! VVVV + + ! Check if enough bytes need to be copied to be worth the big loop + mov #0x40, r0 ! (MT) + cmp/gt r6,r0 ! (MT) 64 > len => slow loop + + bt/s 22f + mov r6,r0 + + ! align the dst to the cache block size if necessary + mov r4, r3 + mov #~(0x1f), r1 + + and r3, r1 + cmp/eq r3, r1 + + bt/s 11f ! dst is already aligned + sub r1, r3 ! r3-r1 -> r3 + shlr2 r3 ! number of loops + +10: mov.l r5,@-r4 + dt r3 + bf/s 10b + add #-4, r6 + +11: ! dst is 32byte aligned + mov r6,r2 + mov #-5,r0 + shld r0,r2 ! number of loops + + add #-32, r4 + mov r5, r0 +12: + movca.l r0,@r4 + mov.l r5,@(4, r4) + mov.l r5,@(8, r4) + mov.l r5,@(12,r4) + mov.l r5,@(16,r4) + mov.l r5,@(20,r4) + add #-0x20, r6 + mov.l r5,@(24,r4) + dt r2 + mov.l r5,@(28,r4) + bf/s 12b + add #-32, r4 + + add #32, r4 + mov #8, r0 + cmp/ge r0, r6 + bf 40f + + mov r6,r0 +22: + shlr2 r0 + shlr r0 ! r0 = r6 >> 3 +3: + dt r0 + mov.l r5,@-r4 ! set 8-byte at once + bf/s 3b + mov.l r5,@-r4 + ! + mov #7,r0 + and r0,r6 + + ! fill bytes (length may be zero) +40: tst r6,r6 + bt 5f +4: + dt r6 + bf/s 4b + mov.b r5,@-r4 +5: + rts + mov r4,r0 |