summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/sfp-machine.h
blob: 3d9f831c3c55749829e76abf8e912cbf47af7991 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
/* Machine-dependent software floating-point definitions.  PPC version.
   Copyright (C) 1997 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Library General Public License as
   published by the Free Software Foundation; either version 2 of the
   License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Library General Public License for more details.

   You should have received a copy of the GNU Library General Public
   License along with the GNU C Library; see the file COPYING.LIB.  If
   not, write to the Free Software Foundation, Inc.,
   59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.

   Actually, this is a PPC (32bit) version, written based on the
   i386, sparc, and sparc64 versions, by me,
   Peter Maydell (pmaydell@chiark.greenend.org.uk).
   Comments are by and large also mine, although they may be inaccurate.

   In picking out asm fragments I've gone with the lowest common
   denominator, which also happens to be the hardware I have :->
   That is, a SPARC without hardware multiply and divide.
 */

/* basic word size definitions */
#define _FP_W_TYPE_SIZE		32
#define _FP_W_TYPE		unsigned long
#define _FP_WS_TYPE		signed long
#define _FP_I_TYPE		long

#define __ll_B			((UWtype) 1 << (W_TYPE_SIZE / 2))
#define __ll_lowpart(t)		((UWtype) (t) & (__ll_B - 1))
#define __ll_highpart(t)	((UWtype) (t) >> (W_TYPE_SIZE / 2))

/* You can optionally code some things like addition in asm. For
 * example, i386 defines __FP_FRAC_ADD_2 as asm. If you don't
 * then you get a fragment of C code [if you change an #ifdef 0
 * in op-2.h] or a call to add_ssaaaa (see below).
 * Good places to look for asm fragments to use are gcc and glibc.
 * gcc's longlong.h is useful.
 */

/* We need to know how to multiply and divide. If the host word size
 * is >= 2*fracbits you can use FP_MUL_MEAT_n_imm(t,R,X,Y) which
 * codes the multiply with whatever gcc does to 'a * b'.
 * _FP_MUL_MEAT_n_wide(t,R,X,Y,f) is used when you have an asm
 * function that can multiply two 1W values and get a 2W result.
 * Otherwise you're stuck with _FP_MUL_MEAT_n_hard(t,R,X,Y) which
 * does bitshifting to avoid overflow.
 * For division there is FP_DIV_MEAT_n_imm(t,R,X,Y,f) for word size
 * >= 2*fracbits, where f is either _FP_DIV_HELP_imm or
 * _FP_DIV_HELP_ldiv (see op-1.h).
 * _FP_DIV_MEAT_udiv() is if you have asm to do 2W/1W => (1W, 1W).
 * [GCC and glibc have longlong.h which has the asm macro udiv_qrnnd
 * to do this.]
 * In general, 'n' is the number of words required to hold the type,
 * and 't' is either S, D or Q for single/double/quad.
 *           -- PMM
 */
/* Example: SPARC64:
 * #define _FP_MUL_MEAT_S(R,X,Y)	_FP_MUL_MEAT_1_imm(S,R,X,Y)
 * #define _FP_MUL_MEAT_D(R,X,Y)	_FP_MUL_MEAT_1_wide(D,R,X,Y,umul_ppmm)
 * #define _FP_MUL_MEAT_Q(R,X,Y)	_FP_MUL_MEAT_2_wide(Q,R,X,Y,umul_ppmm)
 *
 * #define _FP_DIV_MEAT_S(R,X,Y)	_FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
 * #define _FP_DIV_MEAT_D(R,X,Y)	_FP_DIV_MEAT_1_udiv(D,R,X,Y)
 * #define _FP_DIV_MEAT_Q(R,X,Y)	_FP_DIV_MEAT_2_udiv_64(Q,R,X,Y)
 *
 * Example: i386:
 * #define _FP_MUL_MEAT_S(R,X,Y)   _FP_MUL_MEAT_1_wide(S,R,X,Y,_i386_mul_32_64)
 * #define _FP_MUL_MEAT_D(R,X,Y)   _FP_MUL_MEAT_2_wide(D,R,X,Y,_i386_mul_32_64)
 *
 * #define _FP_DIV_MEAT_S(R,X,Y)   _FP_DIV_MEAT_1_udiv(S,R,X,Y,_i386_div_64_32)
 * #define _FP_DIV_MEAT_D(R,X,Y)   _FP_DIV_MEAT_2_udiv_64(D,R,X,Y)
 */

#define _FP_MUL_MEAT_S(R,X,Y)   _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_D(R,X,Y)   _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)

#define _FP_DIV_MEAT_S(R,X,Y)	_FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
#define _FP_DIV_MEAT_D(R,X,Y)	_FP_DIV_MEAT_2_udiv(D,R,X,Y)

/* These macros define what NaN looks like. They're supposed to expand to
 * a comma-separated set of 32bit unsigned ints that encode NaN.
 */
#define _FP_NANFRAC_S		((_FP_QNANBIT_S << 1) - 1)
#define _FP_NANFRAC_D		((_FP_QNANBIT_D << 1) - 1), -1
#define _FP_NANFRAC_Q		((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
#define _FP_NANSIGN_S		0
#define _FP_NANSIGN_D		0
#define _FP_NANSIGN_Q		0

#define _FP_KEEPNANFRACP 1

#ifdef FP_EX_BOOKE_E500_SPE
#define FP_EX_INEXACT		(1 << 21)
#define FP_EX_INVALID		(1 << 20)
#define FP_EX_DIVZERO		(1 << 19)
#define FP_EX_UNDERFLOW		(1 << 18)
#define FP_EX_OVERFLOW		(1 << 17)
#define FP_INHIBIT_RESULTS	0

#define __FPU_FPSCR	(current->thread.spefscr)
#define __FPU_ENABLED_EXC		\
({					\
	(__FPU_FPSCR >> 2) & 0x1f;	\
})
#else
/* Exception flags.  We use the bit positions of the appropriate bits
   in the FPSCR, which also correspond to the FE_* bits.  This makes
   everything easier ;-).  */
#define FP_EX_INVALID         (1 << (31 - 2))
#define FP_EX_INVALID_SNAN	EFLAG_VXSNAN
#define FP_EX_INVALID_ISI	EFLAG_VXISI
#define FP_EX_INVALID_IDI	EFLAG_VXIDI
#define FP_EX_INVALID_ZDZ	EFLAG_VXZDZ
#define FP_EX_INVALID_IMZ	EFLAG_VXIMZ
#define FP_EX_OVERFLOW        (1 << (31 - 3))
#define FP_EX_UNDERFLOW       (1 << (31 - 4))
#define FP_EX_DIVZERO         (1 << (31 - 5))
#define FP_EX_INEXACT         (1 << (31 - 6))

#define __FPU_FPSCR	(current->thread.fpscr.val)

/* We only actually write to the destination register
 * if exceptions signalled (if any) will not trap.
 */
#define __FPU_ENABLED_EXC \
({						\
	(__FPU_FPSCR >> 3) & 0x1f;	\
})

#endif

/*
 * If one NaN is signaling and the other is not,
 * we choose that one, otherwise we choose X.
 */
#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)			\
  do {								\
    if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)		\
	&& !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs))	\
      {								\
	R##_s = X##_s;						\
	_FP_FRAC_COPY_##wc(R,X);				\
      }								\
    else							\
      {								\
	R##_s = Y##_s;						\
	_FP_FRAC_COPY_##wc(R,Y);				\
      }								\
    R##_c = FP_CLS_NAN;						\
  } while (0)


#include <linux/kernel.h>
#include <linux/sched.h>

#define __FPU_TRAP_P(bits) \
	((__FPU_ENABLED_EXC & (bits)) != 0)

#define __FP_PACK_S(val,X)			\
({  int __exc = _FP_PACK_CANONICAL(S,1,X);	\
    if(!__exc || !__FPU_TRAP_P(__exc))		\
        _FP_PACK_RAW_1_P(S,val,X);		\
    __exc;					\
})

#define __FP_PACK_D(val,X)			\
   do {									\
	_FP_PACK_CANONICAL(D, 2, X);					\
	if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS))	\
		_FP_PACK_RAW_2_P(D, val, X);				\
   } while (0)

#define __FP_PACK_DS(val,X)							\
   do {										\
	   FP_DECL_S(__X);							\
	   FP_CONV(S, D, 1, 2, __X, X);						\
	   _FP_PACK_CANONICAL(S, 1, __X);					\
	   if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) {	\
		   _FP_UNPACK_CANONICAL(S, 1, __X);				\
		   FP_CONV(D, S, 2, 1, X, __X);					\
		   _FP_PACK_CANONICAL(D, 2, X);					\
		   if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS))	\
		   _FP_PACK_RAW_2_P(D, val, X);					\
	   }									\
   } while (0)

/* Obtain the current rounding mode. */
#define FP_ROUNDMODE			\
({					\
	__FPU_FPSCR & 0x3;		\
})

/* the asm fragments go here: all these are taken from glibc-2.0.5's
 * stdlib/longlong.h
 */

#include <linux/types.h>
#include <asm/byteorder.h>

/* add_ssaaaa is used in op-2.h and should be equivalent to
 * #define add_ssaaaa(sh,sl,ah,al,bh,bl) (sh = ah+bh+ (( sl = al+bl) < al))
 * add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
 * high_addend_2, low_addend_2) adds two UWtype integers, composed by
 * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
 * respectively.  The result is placed in HIGH_SUM and LOW_SUM.  Overflow
 * (i.e. carry out) is not stored anywhere, and is lost.
 */
#define add_ssaaaa(sh, sl, ah, al, bh, bl)				\
  do {									\
    if (__builtin_constant_p (bh) && (bh) == 0)				\
      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"		\
	     : "=r" ((USItype)(sh)),					\
	       "=&r" ((USItype)(sl))					\
	     : "%r" ((USItype)(ah)),					\
	       "%r" ((USItype)(al)),					\
	       "rI" ((USItype)(bl)));					\
    else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0)		\
      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"		\
	     : "=r" ((USItype)(sh)),					\
	       "=&r" ((USItype)(sl))					\
	     : "%r" ((USItype)(ah)),					\
	       "%r" ((USItype)(al)),					\
	       "rI" ((USItype)(bl)));					\
    else								\
      __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"		\
	     : "=r" ((USItype)(sh)),					\
	       "=&r" ((USItype)(sl))					\
	     : "%r" ((USItype)(ah)),					\
	       "r" ((USItype)(bh)),					\
	       "%r" ((USItype)(al)),					\
	       "rI" ((USItype)(bl)));					\
  } while (0)

/* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to
 * #define sub_ddmmss(sh, sl, ah, al, bh, bl) (sh = ah-bh - ((sl = al-bl) > al))
 * sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
 * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
 * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
 * LOW_SUBTRAHEND_2 respectively.  The result is placed in HIGH_DIFFERENCE
 * and LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
 * and is lost.
 */
#define sub_ddmmss(sh, sl, ah, al, bh, bl)				\
  do {									\
    if (__builtin_constant_p (ah) && (ah) == 0)				\
      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"	\
	       : "=r" ((USItype)(sh)),					\
		 "=&r" ((USItype)(sl))					\
	       : "r" ((USItype)(bh)),					\
		 "rI" ((USItype)(al)),					\
		 "r" ((USItype)(bl)));					\
    else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0)		\
      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"	\
	       : "=r" ((USItype)(sh)),					\
		 "=&r" ((USItype)(sl))					\
	       : "r" ((USItype)(bh)),					\
		 "rI" ((USItype)(al)),					\
		 "r" ((USItype)(bl)));					\
    else if (__builtin_constant_p (bh) && (bh) == 0)			\
      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"		\
	       : "=r" ((USItype)(sh)),					\
		 "=&r" ((USItype)(sl))					\
	       : "r" ((USItype)(ah)),					\
		 "rI" ((USItype)(al)),					\
		 "r" ((USItype)(bl)));					\
    else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0)		\
      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"		\
	       : "=r" ((USItype)(sh)),					\
		 "=&r" ((USItype)(sl))					\
	       : "r" ((USItype)(ah)),					\
		 "rI" ((USItype)(al)),					\
		 "r" ((USItype)(bl)));					\
    else								\
      __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"	\
	       : "=r" ((USItype)(sh)),					\
		 "=&r" ((USItype)(sl))					\
	       : "r" ((USItype)(ah)),					\
		 "r" ((USItype)(bh)),					\
		 "rI" ((USItype)(al)),					\
		 "r" ((USItype)(bl)));					\
  } while (0)

/* asm fragments for mul and div */

/* umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
 * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
 * word product in HIGH_PROD and LOW_PROD.
 */
#define umul_ppmm(ph, pl, m0, m1)					\
  do {									\
    USItype __m0 = (m0), __m1 = (m1);					\
    __asm__ ("mulhwu %0,%1,%2"						\
	     : "=r" ((USItype)(ph))					\
	     : "%r" (__m0),						\
               "r" (__m1));						\
    (pl) = __m0 * __m1;							\
  } while (0)

/* udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
 * denominator) divides a UDWtype, composed by the UWtype integers
 * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
 * in QUOTIENT and the remainder in REMAINDER.  HIGH_NUMERATOR must be less
 * than DENOMINATOR for correct operation.  If, in addition, the most
 * significant bit of DENOMINATOR must be 1, then the pre-processor symbol
 * UDIV_NEEDS_NORMALIZATION is defined to 1.
 */
#define udiv_qrnnd(q, r, n1, n0, d)					\
  do {									\
    UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m;			\
    __d1 = __ll_highpart (d);						\
    __d0 = __ll_lowpart (d);						\
									\
    __r1 = (n1) % __d1;							\
    __q1 = (n1) / __d1;							\
    __m = (UWtype) __q1 * __d0;						\
    __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
    if (__r1 < __m)							\
      {									\
	__q1--, __r1 += (d);						\
	if (__r1 >= (d)) /* we didn't get carry when adding to __r1 */	\
	  if (__r1 < __m)						\
	    __q1--, __r1 += (d);					\
      }									\
    __r1 -= __m;							\
									\
    __r0 = __r1 % __d1;							\
    __q0 = __r1 / __d1;							\
    __m = (UWtype) __q0 * __d0;						\
    __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
    if (__r0 < __m)							\
      {									\
	__q0--, __r0 += (d);						\
	if (__r0 >= (d))						\
	  if (__r0 < __m)						\
	    __q0--, __r0 += (d);					\
      }									\
    __r0 -= __m;							\
									\
    (q) = (UWtype) __q1 * __ll_B | __q0;				\
    (r) = __r0;								\
  } while (0)

#define UDIV_NEEDS_NORMALIZATION 1

#define abort()								\
	return 0

#ifdef __BIG_ENDIAN
#define __BYTE_ORDER __BIG_ENDIAN
#else
#define __BYTE_ORDER __LITTLE_ENDIAN
#endif

/* Exception flags. */
#define EFLAG_INVALID		(1 << (31 - 2))
#define EFLAG_OVERFLOW		(1 << (31 - 3))
#define EFLAG_UNDERFLOW		(1 << (31 - 4))
#define EFLAG_DIVZERO		(1 << (31 - 5))
#define EFLAG_INEXACT		(1 << (31 - 6))

#define EFLAG_VXSNAN		(1 << (31 - 7))
#define EFLAG_VXISI		(1 << (31 - 8))
#define EFLAG_VXIDI		(1 << (31 - 9))
#define EFLAG_VXZDZ		(1 << (31 - 10))
#define EFLAG_VXIMZ		(1 << (31 - 11))
#define EFLAG_VXVC		(1 << (31 - 12))
#define EFLAG_VXSOFT		(1 << (31 - 21))
#define EFLAG_VXSQRT		(1 << (31 - 22))
#define EFLAG_VXCVI		(1 << (31 - 23))
OpenPOWER on IntegriCloud