diff options
-rw-r--r-- | arch/powerpc/kernel/Makefile | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 31 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_32.S | 27 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_64.S | 19 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/ppc/kernel/align.c | 4 | ||||
-rw-r--r-- | arch/ppc/kernel/misc.S | 27 | ||||
-rw-r--r-- | arch/ppc/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/ppc/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/ppc/math-emu/sfp-machine.h | 2 | ||||
-rw-r--r-- | arch/ppc64/Kconfig | 3 | ||||
-rw-r--r-- | arch/ppc64/Makefile | 1 | ||||
-rw-r--r-- | arch/ppc64/kernel/align.c | 4 | ||||
-rw-r--r-- | arch/ppc64/kernel/head.S | 59 | ||||
-rw-r--r-- | arch/ppc64/kernel/misc.S | 51 | ||||
-rw-r--r-- | arch/ppc64/kernel/signal.c | 2 | ||||
-rw-r--r-- | include/asm-powerpc/processor.h | 11 | ||||
-rw-r--r-- | include/asm-powerpc/system.h | 4 | ||||
-rw-r--r-- | include/asm-ppc/system.h | 4 | ||||
-rw-r--r-- | include/asm-ppc64/system.h | 4 |
22 files changed, 59 insertions, 209 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index a733347..94cf917 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -29,7 +29,6 @@ extra-$(CONFIG_44x) := head_44x.o extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o extra-$(CONFIG_8xx) := head_8xx.o extra-$(CONFIG_PPC64) += entry_64.o -extra-$(CONFIG_PPC_FPU) += fpu.o extra-y += vmlinux.lds obj-y += process.o init_task.o time.o \ @@ -49,7 +48,7 @@ else # stuff used from here for ARCH=ppc or ARCH=ppc64 obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o -fpux-$(CONFIG_PPC32) += fpu.o -extra-$(CONFIG_PPC_FPU) += $(fpux-y) endif + +extra-$(CONFIG_PPC_FPU) += fpu.o diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 563d445..51fd78d 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -48,7 +48,7 @@ _GLOBAL(load_up_fpu) addi r4,r4,THREAD /* want last_task_used_math->thread */ SAVE_32FPRS(0, r4) mffs fr0 - stfd fr0,THREAD_FPSCR-4(r4) + stfd fr0,THREAD_FPSCR(r4) LDL r5,PT_REGS(r4) tophys(r5,r5) LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5) @@ -71,7 +71,7 @@ _GLOBAL(load_up_fpu) or r12,r12,r4 std r12,_MSR(r1) #endif - lfd fr0,THREAD_FPSCR-4(r5) + lfd fr0,THREAD_FPSCR(r5) mtfsf 0xff,fr0 REST_32FPRS(0, r5) #ifndef CONFIG_SMP @@ -104,7 +104,7 @@ _GLOBAL(giveup_fpu) CMPI 0,r5,0 SAVE_32FPRS(0, r3) mffs fr0 - stfd fr0,THREAD_FPSCR-4(r3) + stfd fr0,THREAD_FPSCR(r3) beq 1f LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r3,MSR_FP|MSR_FE0|MSR_FE1 @@ -117,3 +117,28 @@ _GLOBAL(giveup_fpu) STL r5,OFF(last_task_used_math)(r4) #endif /* CONFIG_SMP */ blr + +/* + * These are used in the alignment trap handler when emulating + * single-precision loads and stores. + * We restore and save the fpscr so the task gets the same result + * and exceptions as if the cpu had performed the load or store. + */ + +_GLOBAL(cvt_fd) + lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ + mtfsf 0xff,0 + lfs 0,0(r3) + stfd 0,0(r4) + mffs 0 + stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ + blr + +_GLOBAL(cvt_df) + lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ + mtfsf 0xff,0 + lfd 0,0(r3) + stfs 0,0(r4) + mffs 0 + stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ + blr diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 303229b..3bedb53 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -993,33 +993,6 @@ _GLOBAL(_get_SP) blr /* - * These are used in the alignment trap handler when emulating - * single-precision loads and stores. - * We restore and save the fpscr so the task gets the same result - * and exceptions as if the cpu had performed the load or store. - */ - -#ifdef CONFIG_PPC_FPU -_GLOBAL(cvt_fd) - lfd 0,-4(r5) /* load up fpscr value */ - mtfsf 0xff,0 - lfs 0,0(r3) - stfd 0,0(r4) - mffs 0 /* save new fpscr value */ - stfd 0,-4(r5) - blr - -_GLOBAL(cvt_df) - lfd 0,-4(r5) /* load up fpscr value */ - mtfsf 0xff,0 - lfd 0,0(r3) - stfs 0,0(r4) - mffs 0 /* save new fpscr value */ - stfd 0,-4(r5) - blr -#endif - -/* * Create a kernel thread * kernel_thread(fn, arg, flags) */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 4775bed..b3e95ff 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -462,25 +462,6 @@ _GLOBAL(_outsl_ns) sync blr - -_GLOBAL(cvt_fd) - lfd 0,0(r5) /* load up fpscr value */ - mtfsf 0xff,0 - lfs 0,0(r3) - stfd 0,0(r4) - mffs 0 /* save new fpscr value */ - stfd 0,0(r5) - blr - -_GLOBAL(cvt_df) - lfd 0,0(r5) /* load up fpscr value */ - mtfsf 0xff,0 - lfd 0,0(r3) - stfs 0,0(r4) - mffs 0 /* save new fpscr value */ - stfd 0,0(r5) - blr - /* * identify_cpu and calls setup_cpu * In: r3 = base of the cpu_specs array diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 047da1a..8f85dab 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -665,7 +665,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) #endif #endif /* CONFIG_SMP */ memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); - current->thread.fpscr = 0; + current->thread.fpscr.val = 0; #ifdef CONFIG_ALTIVEC memset(current->thread.vr, 0, sizeof(current->thread.vr)); memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 92452b2..444c3e8 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -403,7 +403,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, ELF_NFPREG * sizeof(double))) return 1; - current->thread.fpscr = 0; /* turn off all fp exceptions */ + current->thread.fpscr.val = 0; /* turn off all fp exceptions */ #ifdef CONFIG_ALTIVEC /* save altivec registers */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f875803..5d638ec 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -549,7 +549,7 @@ static void parse_fpe(struct pt_regs *regs) flush_fp_to_thread(current); - fpscr = current->thread.fpscr; + fpscr = current->thread.fpscr.val; /* Invalid operation */ if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c index ff81da9..ab398c4 100644 --- a/arch/ppc/kernel/align.c +++ b/arch/ppc/kernel/align.c @@ -375,7 +375,7 @@ fix_alignment(struct pt_regs *regs) #ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); - cvt_fd(&data.f, &data.d, ¤t->thread.fpscr); + cvt_fd(&data.f, &data.d, ¤t->thread); preempt_enable(); #else return 0; @@ -385,7 +385,7 @@ fix_alignment(struct pt_regs *regs) #ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); - cvt_df(&data.d, &data.f, ¤t->thread.fpscr); + cvt_df(&data.d, &data.f, ¤t->thread); preempt_enable(); #else return 0; diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 2350f3e..3056ede 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -968,33 +968,6 @@ _GLOBAL(_get_SP) blr /* - * These are used in the alignment trap handler when emulating - * single-precision loads and stores. - * We restore and save the fpscr so the task gets the same result - * and exceptions as if the cpu had performed the load or store. - */ - -#ifdef CONFIG_PPC_FPU -_GLOBAL(cvt_fd) - lfd 0,-4(r5) /* load up fpscr value */ - mtfsf 0xff,0 - lfs 0,0(r3) - stfd 0,0(r4) - mffs 0 /* save new fpscr value */ - stfd 0,-4(r5) - blr - -_GLOBAL(cvt_df) - lfd 0,-4(r5) /* load up fpscr value */ - mtfsf 0xff,0 - lfd 0,0(r3) - stfs 0,0(r4) - mffs 0 /* save new fpscr value */ - stfd 0,-4(r5) - blr -#endif - -/* * Create a kernel thread * kernel_thread(fn, arg, flags) */ diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c index 6d60c40..78ea101 100644 --- a/arch/ppc/kernel/process.c +++ b/arch/ppc/kernel/process.c @@ -542,7 +542,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) last_task_used_spe = NULL; #endif memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); - current->thread.fpscr = 0; + current->thread.fpscr.val = 0; #ifdef CONFIG_ALTIVEC memset(current->thread.vr, 0, sizeof(current->thread.vr)); memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index 5e4bf88..f265b81 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c @@ -659,7 +659,7 @@ void program_check_exception(struct pt_regs *regs) giveup_fpu(current); preempt_enable(); - fpscr = current->thread.fpscr; + fpscr = current->thread.fpscr.val; fpscr &= fpscr << 22; /* mask summary bits with enables */ if (fpscr & FPSCR_VX) code = FPE_FLTINV; diff --git a/arch/ppc/math-emu/sfp-machine.h b/arch/ppc/math-emu/sfp-machine.h index 686e06d..4b17d83 100644 --- a/arch/ppc/math-emu/sfp-machine.h +++ b/arch/ppc/math-emu/sfp-machine.h @@ -166,7 +166,7 @@ extern int fp_pack_ds(void *, long, unsigned long, unsigned long, long, long); #include <linux/kernel.h> #include <linux/sched.h> -#define __FPU_FPSCR (current->thread.fpscr) +#define __FPU_FPSCR (current->thread.fpscr.val) /* We only actually write to the destination register * if exceptions signalled (if any) will not trap. diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig index 8cc73cc..b889277 100644 --- a/arch/ppc64/Kconfig +++ b/arch/ppc64/Kconfig @@ -197,6 +197,9 @@ config BOOTX_TEXT config POWER4 def_bool y +config PPC_FPU + def_bool y + config POWER4_ONLY bool "Optimize for POWER4" default n diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile index 4d18bdb..ba59225 100644 --- a/arch/ppc64/Makefile +++ b/arch/ppc64/Makefile @@ -80,6 +80,7 @@ endif CFLAGS += $(call cc-option,-funit-at-a-time) head-y := arch/ppc64/kernel/head.o +head-y += arch/powerpc/kernel/fpu.o libs-y += arch/ppc64/lib/ core-y += arch/ppc64/kernel/ arch/powerpc/kernel/ diff --git a/arch/ppc64/kernel/align.c b/arch/ppc64/kernel/align.c index 330e7ef..256d5b5 100644 --- a/arch/ppc64/kernel/align.c +++ b/arch/ppc64/kernel/align.c @@ -313,7 +313,7 @@ fix_alignment(struct pt_regs *regs) /* Doing stfs, have to convert to single */ preempt_disable(); enable_kernel_fp(); - cvt_df(¤t->thread.fpr[reg], (float *)&data.v[4], ¤t->thread.fpscr); + cvt_df(¤t->thread.fpr[reg], (float *)&data.v[4], ¤t->thread); disable_kernel_fp(); preempt_enable(); } @@ -349,7 +349,7 @@ fix_alignment(struct pt_regs *regs) /* Doing lfs, have to convert to double */ preempt_disable(); enable_kernel_fp(); - cvt_fd((float *)&data.v[4], ¤t->thread.fpr[reg], ¤t->thread.fpscr); + cvt_fd((float *)&data.v[4], ¤t->thread.fpr[reg], ¤t->thread); disable_kernel_fp(); preempt_enable(); } diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S index f58af9c..929f9f4 100644 --- a/arch/ppc64/kernel/head.S +++ b/arch/ppc64/kernel/head.S @@ -81,7 +81,7 @@ _stext: _GLOBAL(__start) /* NOP this out unconditionally */ BEGIN_FTR_SECTION - b .__start_initialization_multiplatform + b .__start_initialization_multiplatform END_FTR_SECTION(0, 1) #endif /* CONFIG_PPC_MULTIPLATFORM */ @@ -747,6 +747,7 @@ bad_stack: * any task or sent any task a signal, you should use * ret_from_except or ret_from_except_lite instead of this. */ + .globl fast_exception_return fast_exception_return: ld r12,_MSR(r1) ld r11,_NIP(r1) @@ -858,62 +859,6 @@ fp_unavailable_common: bl .kernel_fp_unavailable_exception BUG_OPCODE -/* - * load_up_fpu(unused, unused, tsk) - * Disable FP for the task which had the FPU previously, - * and save its floating-point registers in its thread_struct. - * Enables the FPU for use in the kernel on return. - * On SMP we know the fpu is free, since we give it up every - * switch (ie, no lazy save of the FP registers). - * On entry: r13 == 'current' && last_task_used_math != 'current' - */ -_STATIC(load_up_fpu) - mfmsr r5 /* grab the current MSR */ - ori r5,r5,MSR_FP - mtmsrd r5 /* enable use of fpu now */ - isync -/* - * For SMP, we don't do lazy FPU switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_fpu in switch_to. - * - */ -#ifndef CONFIG_SMP - ld r3,last_task_used_math@got(r2) - ld r4,0(r3) - cmpdi 0,r4,0 - beq 1f - /* Save FP state to last_task_used_math's THREAD struct */ - addi r4,r4,THREAD - SAVE_32FPRS(0, r4) - mffs fr0 - stfd fr0,THREAD_FPSCR(r4) - /* Disable FP for last_task_used_math */ - ld r5,PT_REGS(r4) - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r6,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r6 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - /* enable use of FP after return */ - ld r4,PACACURRENT(r13) - addi r5,r4,THREAD /* Get THREAD */ - ld r4,THREAD_FPEXC_MODE(r5) - ori r12,r12,MSR_FP - or r12,r12,r4 - std r12,_MSR(r1) - lfd fr0,THREAD_FPSCR(r5) - mtfsf 0xff,fr0 - REST_32FPRS(0, r5) -#ifndef CONFIG_SMP - /* Update last_task_used_math to 'current' */ - subi r4,r5,THREAD /* Back to 'current' */ - std r4,0(r3) -#endif /* CONFIG_SMP */ - /* restore registers and return */ - b fast_exception_return - .align 7 .globl altivec_unavailable_common altivec_unavailable_common: diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index a33448c..9cae3d5 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S @@ -451,25 +451,6 @@ _GLOBAL(_outsl_ns) sync blr - -_GLOBAL(cvt_fd) - lfd 0,0(r5) /* load up fpscr value */ - mtfsf 0xff,0 - lfs 0,0(r3) - stfd 0,0(r4) - mffs 0 /* save new fpscr value */ - stfd 0,0(r5) - blr - -_GLOBAL(cvt_df) - lfd 0,0(r5) /* load up fpscr value */ - mtfsf 0xff,0 - lfd 0,0(r3) - stfs 0,0(r4) - mffs 0 /* save new fpscr value */ - stfd 0,0(r5) - blr - /* * identify_cpu and calls setup_cpu * In: r3 = base of the cpu_specs array @@ -655,38 +636,6 @@ _GLOBAL(disable_kernel_fp) isync blr -/* - * giveup_fpu(tsk) - * Disable FP for the task given as the argument, - * and save the floating-point registers in its thread_struct. - * Enables the FPU for use in the kernel on return. - */ -_GLOBAL(giveup_fpu) - mfmsr r5 - ori r5,r5,MSR_FP - mtmsrd r5 /* enable use of fpu now */ - isync - cmpdi 0,r3,0 - beqlr- /* if no previous owner, done */ - addi r3,r3,THREAD /* want THREAD of task */ - ld r5,PT_REGS(r3) - cmpdi 0,r5,0 - SAVE_32FPRS(0, r3) - mffs fr0 - stfd fr0,THREAD_FPSCR(r3) - beq 1f - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r3,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r3 /* disable FP for previous task */ - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#ifndef CONFIG_SMP - li r5,0 - ld r4,last_task_used_math@got(r2) - std r5,0(r4) -#endif /* CONFIG_SMP */ - blr - #ifdef CONFIG_ALTIVEC #if 0 /* this has no callers for now */ diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c index 347112c..ec9d098 100644 --- a/arch/ppc64/kernel/signal.c +++ b/arch/ppc64/kernel/signal.c @@ -133,7 +133,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, flush_fp_to_thread(current); /* Make sure signal doesn't get spurrious FP exceptions */ - current->thread.fpscr = 0; + current->thread.fpscr.val = 0; #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index 9592f53..eee954a 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -162,10 +162,11 @@ struct thread_struct { unsigned long dbcr1; #endif double fpr[32]; /* Complete floating point set */ -#ifdef CONFIG_PPC32 - unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */ -#endif - unsigned long fpscr; /* Floating point status */ + struct { /* fpr ... fpscr must be contiguous */ + + unsigned int pad; + unsigned int val; /* Floating point status */ + } fpscr; int fpexc_mode; /* floating-point exception mode */ #ifdef CONFIG_PPC64 unsigned long start_tb; /* Start purr when proc switched in */ @@ -207,7 +208,7 @@ struct thread_struct { .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .fs = KERNEL_DS, \ .fpr = {0}, \ - .fpscr = 0, \ + .fpscr = { .val = 0, }, \ .fpexc_mode = MSR_FE0|MSR_FE1, \ } #endif diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index d60c8c9..e926e43 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -132,8 +132,8 @@ extern int emulate_altivec(struct pt_regs *); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); extern int fix_alignment(struct pt_regs *); -extern void cvt_fd(float *from, double *to, unsigned long *fpscr); -extern void cvt_df(double *from, float *to, unsigned long *fpscr); +extern void cvt_fd(float *from, double *to, struct thread_struct *thread); +extern void cvt_df(double *from, float *to, struct thread_struct *thread); #ifdef CONFIG_ALTIVEC extern void flush_altivec_to_thread(struct task_struct *); diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index 1f31078..eb30c09 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -82,8 +82,8 @@ extern int emulate_altivec(struct pt_regs *); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); extern int fix_alignment(struct pt_regs *); -extern void cvt_fd(float *from, double *to, unsigned long *fpscr); -extern void cvt_df(double *from, float *to, unsigned long *fpscr); +extern void cvt_fd(float *from, double *to, struct thread_struct *thread); +extern void cvt_df(double *from, float *to, struct thread_struct *thread); #ifdef CONFIG_ALTIVEC extern void flush_altivec_to_thread(struct task_struct *); diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h index 2e17ef7..fd7c1f8 100644 --- a/include/asm-ppc64/system.h +++ b/include/asm-ppc64/system.h @@ -120,8 +120,8 @@ extern void giveup_altivec(struct task_struct *); extern void disable_kernel_altivec(void); extern void enable_kernel_altivec(void); extern int emulate_altivec(struct pt_regs *); -extern void cvt_fd(float *from, double *to, unsigned long *fpscr); -extern void cvt_df(double *from, float *to, unsigned long *fpscr); +extern void cvt_fd(float *from, double *to, struct thread_struct *thread); +extern void cvt_df(double *from, float *to, struct thread_struct *thread); #ifdef CONFIG_ALTIVEC extern void flush_altivec_to_thread(struct task_struct *); |