1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
/*
* FPU support code, moved here from head.S so that it can be used
* by chips which use other head-whatever.S files.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
/*
* This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously,
* and save its floating-point registers in its thread_struct.
* Load up this task's FP registers from its thread_struct,
* enable the FPU for the current task and return to the task.
*/
_GLOBAL(load_up_fpu)
mfmsr r5
ori r5,r5,MSR_FP
SYNC
MTMSRD(r5) /* enable use of fpu now */
isync
/*
* For SMP, we don't do lazy FPU switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
* to another. Instead we call giveup_fpu in switch_to.
*/
#ifndef CONFIG_SMP
LOADBASE(r3, last_task_used_math)
tophys(r3,r3)
LDL r4,OFF(last_task_used_math)(r3)
CMPI 0,r4,0
beq 1f
tophys(r4,r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR(r4)
LDL r5,PT_REGS(r4)
tophys(r5,r5)
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r10,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r10 /* disable FP for previous task */
STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
#ifdef CONFIG_PPC32
mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
#else
ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */
ld r4,THREAD_FPEXC_MODE(r5)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
#endif
lfd fr0,THREAD_FPSCR(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
tovirt(r4,r4)
STL r4,OFF(last_task_used_math)(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
b fast_exception_return
/*
* giveup_fpu(tsk)
* Disable FP for the task given as the argument,
* and save the floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return.
*/
_GLOBAL(giveup_fpu)
mfmsr r5
ori r5,r5,MSR_FP
SYNC_601
ISYNC_601
MTMSRD(r5) /* enable use of fpu now */
SYNC_601
isync
CMPI 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
LDL r5,PT_REGS(r3)
CMPI 0,r5,0
SAVE_32FPRS(0, r3)
mffs fr0
stfd fr0,THREAD_FPSCR(r3)
beq 1f
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r3 /* disable FP for previous task */
STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
LOADBASE(r4,last_task_used_math)
STL r5,OFF(last_task_used_math)(r4)
#endif /* CONFIG_SMP */
blr
/*
* These are used in the alignment trap handler when emulating
* single-precision loads and stores.
* We restore and save the fpscr so the task gets the same result
* and exceptions as if the cpu had performed the load or store.
*/
_GLOBAL(cvt_fd)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
mtfsf 0xff,0
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0
stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
blr
_GLOBAL(cvt_df)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
mtfsf 0xff,0
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0
stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
blr
|