diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2008-03-10 15:28:04 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 19:19:55 +0200 |
commit | 61c4628b538608c1a85211ed8438136adfeb9a95 (patch) | |
tree | 290a695299a363153bc692e6d705ac680d64359e /arch | |
parent | fa5c4639419668cbb18ca3d20c1253559a3b43ae (diff) | |
download | op-kernel-dev-61c4628b538608c1a85211ed8438136adfeb9a95.zip op-kernel-dev-61c4628b538608c1a85211ed8438136adfeb9a95.tar.gz |
x86, fpu: split FPU state from task struct - v5
Split the FPU save area from the task struct. This allows easy migration
of FPU context, and it's generally cleaner. It also allows the following
two optimizations:
1) only allocate when the application actually uses FPU, so in the first
lazy FPU trap. This could save memory for non-fpu using apps. Next patch
does this lazy allocation.
2) allocate the right size for the actual cpu rather than 512 bytes always.
Patches enabling xsave/xrstor support (coming shortly) will take advantage
of this.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 80 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 35 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/traps_32.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/traps_64.c | 6 | ||||
-rw-r--r-- | arch/x86/math-emu/fpu_entry.c | 4 | ||||
-rw-r--r-- | arch/x86/math-emu/fpu_system.h | 26 | ||||
-rw-r--r-- | arch/x86/math-emu/reg_ld_str.c | 4 |
10 files changed, 107 insertions, 59 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c3920ea..7a2a2e9 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o obj-y += tsc_$(BITS).o io_delay.o rtc.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o +obj-y += process.o obj-y += i387.o obj-y += ptrace.o obj-y += ds.o diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 8f8102d..baf632b 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/regset.h> #include <linux/sched.h> +#include <linux/bootmem.h> #include <asm/sigcontext.h> #include <asm/processor.h> @@ -35,17 +36,18 @@ #endif static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; +unsigned int xstate_size; +static struct i387_fxsave_struct fx_scratch __cpuinitdata; -void mxcsr_feature_mask_init(void) +void __cpuinit mxcsr_feature_mask_init(void) { unsigned long mask = 0; clts(); if (cpu_has_fxsr) { - memset(¤t->thread.i387.fxsave, 0, - sizeof(struct i387_fxsave_struct)); - asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave)); - mask = current->thread.i387.fxsave.mxcsr_mask; + memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); + asm volatile("fxsave %0" : : "m" (fx_scratch)); + mask = fx_scratch.mxcsr_mask; if (mask == 0) mask = 0x0000ffbf; } @@ -53,6 +55,17 @@ void mxcsr_feature_mask_init(void) stts(); } +void __init init_thread_xstate(void) +{ + if (cpu_has_fxsr) + xstate_size = sizeof(struct i387_fxsave_struct); +#ifdef CONFIG_X86_32 + else + xstate_size = sizeof(struct i387_fsave_struct); +#endif + init_task.thread.xstate = alloc_bootmem(xstate_size); +} + #ifdef CONFIG_X86_64 /* * Called at bootup to set up the initial FPU state that is later cloned @@ -61,10 +74,6 @@ void mxcsr_feature_mask_init(void) void __cpuinit fpu_init(void) { unsigned long oldcr0 = read_cr0(); - extern void __bad_fxsave_alignment(void); - - if (offsetof(struct task_struct, thread.i387.fxsave) & 15) - __bad_fxsave_alignment(); set_in_cr4(X86_CR4_OSFXSR); set_in_cr4(X86_CR4_OSXMMEXCPT); @@ -93,18 +102,19 @@ void init_fpu(struct task_struct *tsk) } if (cpu_has_fxsr) { - memset(&tsk->thread.i387.fxsave, 0, - sizeof(struct i387_fxsave_struct)); - tsk->thread.i387.fxsave.cwd = 0x37f; + struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; + + memset(fx, 0, xstate_size); + fx->cwd = 0x37f; if (cpu_has_xmm) - tsk->thread.i387.fxsave.mxcsr = MXCSR_DEFAULT; + fx->mxcsr = MXCSR_DEFAULT; } else { - memset(&tsk->thread.i387.fsave, 0, - sizeof(struct i387_fsave_struct)); - tsk->thread.i387.fsave.cwd = 0xffff037fu; - tsk->thread.i387.fsave.swd = 0xffff0000u; - tsk->thread.i387.fsave.twd = 0xffffffffu; - tsk->thread.i387.fsave.fos = 0xffff0000u; + struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; + memset(fp, 0, xstate_size); + fp->cwd = 0xffff037fu; + fp->swd = 0xffff0000u; + fp->twd = 0xffffffffu; + fp->fos = 0xffff0000u; } /* * Only the device not available exception or ptrace can call init_fpu. @@ -132,7 +142,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, init_fpu(target); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.i387.fxsave, 0, -1); + &target->thread.xstate->fxsave, 0, -1); } int xfpregs_set(struct task_struct *target, const struct user_regset *regset, @@ -148,12 +158,12 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, set_stopped_child_used_math(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.i387.fxsave, 0, -1); + &target->thread.xstate->fxsave, 0, -1); /* * mxcsr reserved bits must be masked to zero for security reasons. */ - target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; + target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; return ret; } @@ -233,7 +243,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) static void convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) { - struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave; + struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; int i; @@ -273,7 +283,7 @@ static void convert_to_fxsr(struct task_struct *tsk, const struct user_i387_ia32_struct *env) { - struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave; + struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; int i; @@ -310,7 +320,8 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, if (!cpu_has_fxsr) { return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.i387.fsave, 0, -1); + &target->thread.xstate->fsave, 0, + -1); } if (kbuf && pos == 0 && count == sizeof(env)) { @@ -338,7 +349,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, if (!cpu_has_fxsr) { return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.i387.fsave, 0, -1); + &target->thread.xstate->fsave, 0, -1); } if (pos > 0 || count < sizeof(env)) @@ -358,11 +369,11 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) { struct task_struct *tsk = current; + struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; unlazy_fpu(tsk); - tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd; - if (__copy_to_user(buf, &tsk->thread.i387.fsave, - sizeof(struct i387_fsave_struct))) + fp->status = fp->swd; + if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) return -1; return 1; } @@ -370,6 +381,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) { struct task_struct *tsk = current; + struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; struct user_i387_ia32_struct env; int err = 0; @@ -379,12 +391,12 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) if (__copy_to_user(buf, &env, sizeof(env))) return -1; - err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status); + err |= __put_user(fx->swd, &buf->status); err |= __put_user(X86_FXSR_MAGIC, &buf->magic); if (err) return -1; - if (__copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave, + if (__copy_to_user(&buf->_fxsr_env[0], fx, sizeof(struct i387_fxsave_struct))) return -1; return 1; @@ -417,7 +429,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) struct task_struct *tsk = current; clear_fpu(tsk); - return __copy_from_user(&tsk->thread.i387.fsave, buf, + return __copy_from_user(&tsk->thread.xstate->fsave, buf, sizeof(struct i387_fsave_struct)); } @@ -428,10 +440,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) int err; clear_fpu(tsk); - err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0], + err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], sizeof(struct i387_fxsave_struct)); /* mxcsr reserved bits must be masked to zero for security reasons */ - tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; + tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; if (err || __copy_from_user(&env, buf, sizeof(env))) return 1; convert_to_fxsr(tsk, &env); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c new file mode 100644 index 0000000..ead24ef --- /dev/null +++ b/arch/x86/kernel/process.c @@ -0,0 +1,35 @@ +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/slab.h> +#include <linux/sched.h> + +static struct kmem_cache *task_xstate_cachep; + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + *dst = *src; + dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); + if (!dst->thread.xstate) + return -ENOMEM; + WARN_ON((unsigned long)dst->thread.xstate & 15); + memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); + return 0; +} + +void free_thread_info(struct thread_info *ti) +{ + kmem_cache_free(task_xstate_cachep, ti->task->thread.xstate); + ti->task->thread.xstate = NULL; + + free_pages((unsigned long)(ti), get_order(THREAD_SIZE)); +} + +void arch_task_cache_init(void) +{ + task_xstate_cachep = + kmem_cache_create("task_xstate", xstate_size, + __alignof__(union thread_xstate), + SLAB_PANIC, NULL); +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a3790a3..3890a5d 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -703,7 +703,7 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct /* we're going to use this soon, after a few expensive things */ if (next_p->fpu_counter > 5) - prefetch(&next->i387.fxsave); + prefetch(next->xstate); /* * Reload esp0. diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4c13b14..b795e83 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -682,7 +682,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* we're going to use this soon, after a few expensive things */ if (next_p->fpu_counter>5) - prefetch(&next->i387.fxsave); + prefetch(next->xstate); /* * Reload esp0, LDT and the page table pointer: diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index dc42730..8d136a7 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -1208,11 +1208,6 @@ void __init trap_init(void) #endif set_trap_gate(19, &simd_coprocessor_error); - /* - * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. - * Generate a build-time error if the alignment is wrong. - */ - BUILD_BUG_ON(offsetof(struct task_struct, thread.i387.fxsave) & 15); if (cpu_has_fxsr) { printk(KERN_INFO "Enabling fast FPU save and restore... "); set_in_cr4(X86_CR4_OSFXSR); @@ -1233,6 +1228,7 @@ void __init trap_init(void) set_bit(SYSCALL_VECTOR, used_vectors); + init_thread_xstate(); /* * Should be a barrier for any external CPU state: */ diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 6d883b1..dc0cb49 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -1128,7 +1128,7 @@ asmlinkage void math_state_restore(void) if (!used_math()) init_fpu(me); - restore_fpu_checking(&me->thread.i387.fxsave); + restore_fpu_checking(&me->thread.xstate->fxsave); task_thread_info(me)->status |= TS_USEDFPU; me->fpu_counter++; } @@ -1164,6 +1164,10 @@ void __init trap_init(void) #endif /* + * initialize the per thread extended state: + */ + init_thread_xstate(); + /* * Should be a barrier for any external CPU state. */ cpu_init(); diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index 4bab3b1..6e38d87 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c @@ -678,7 +678,7 @@ int fpregs_soft_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct i387_soft_struct *s387 = &target->thread.i387.soft; + struct i387_soft_struct *s387 = &target->thread.xstate->soft; void *space = s387->st_space; int ret; int offset, other, i, tags, regnr, tag, newtop; @@ -730,7 +730,7 @@ int fpregs_soft_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - struct i387_soft_struct *s387 = &target->thread.i387.soft; + struct i387_soft_struct *s387 = &target->thread.xstate->soft; const void *space = s387->st_space; int ret; int offset = (S387->ftop & 7) * 10, other = 80 - offset; diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h index a3ae28c..13488fa 100644 --- a/arch/x86/math-emu/fpu_system.h +++ b/arch/x86/math-emu/fpu_system.h @@ -35,8 +35,8 @@ #define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \ == (1 << 10)) -#define I387 (current->thread.i387) -#define FPU_info (I387.soft.info) +#define I387 (current->thread.xstate) +#define FPU_info (I387->soft.info) #define FPU_CS (*(unsigned short *) &(FPU_info->___cs)) #define FPU_SS (*(unsigned short *) &(FPU_info->___ss)) @@ -46,25 +46,25 @@ #define FPU_EIP (FPU_info->___eip) #define FPU_ORIG_EIP (FPU_info->___orig_eip) -#define FPU_lookahead (I387.soft.lookahead) +#define FPU_lookahead (I387->soft.lookahead) /* nz if ip_offset and cs_selector are not to be set for the current instruction. */ -#define no_ip_update (*(u_char *)&(I387.soft.no_update)) -#define FPU_rm (*(u_char *)&(I387.soft.rm)) +#define no_ip_update (*(u_char *)&(I387->soft.no_update)) +#define FPU_rm (*(u_char *)&(I387->soft.rm)) /* Number of bytes of data which can be legally accessed by the current instruction. This only needs to hold a number <= 108, so a byte will do. */ -#define access_limit (*(u_char *)&(I387.soft.alimit)) +#define access_limit (*(u_char *)&(I387->soft.alimit)) -#define partial_status (I387.soft.swd) -#define control_word (I387.soft.cwd) -#define fpu_tag_word (I387.soft.twd) -#define registers (I387.soft.st_space) -#define top (I387.soft.ftop) +#define partial_status (I387->soft.swd) +#define control_word (I387->soft.cwd) +#define fpu_tag_word (I387->soft.twd) +#define registers (I387->soft.st_space) +#define top (I387->soft.ftop) -#define instruction_address (*(struct address *)&I387.soft.fip) -#define operand_address (*(struct address *)&I387.soft.foo) +#define instruction_address (*(struct address *)&I387->soft.fip) +#define operand_address (*(struct address *)&I387->soft.foo) #define FPU_access_ok(x,y,z) if ( !access_ok(x,y,z) ) \ math_abort(FPU_info,SIGSEGV) diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c index 02af772..d597fe7 100644 --- a/arch/x86/math-emu/reg_ld_str.c +++ b/arch/x86/math-emu/reg_ld_str.c @@ -1180,8 +1180,8 @@ u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d) control_word |= 0xffff0040; partial_status = status_word() | 0xffff0000; fpu_tag_word |= 0xffff0000; - I387.soft.fcs &= ~0xf8000000; - I387.soft.fos |= 0xffff0000; + I387->soft.fcs &= ~0xf8000000; + I387->soft.fos |= 0xffff0000; #endif /* PECULIAR_486 */ if (__copy_to_user(d, &control_word, 7 * 4)) FPU_abort; |