summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authormux <mux@FreeBSD.org>2003-09-30 22:35:27 +0000
committermux <mux@FreeBSD.org>2003-09-30 22:35:27 +0000
commit9ce524bfd672801e68c7a4b1b00fe0186275228c (patch)
tree4892b4f112eb2ebbf286e3b9eb99b851042a47f7 /sys/sparc64
parent2b4da533fa1c7c3a7874365662836fa09966011f (diff)
downloadFreeBSD-src-9ce524bfd672801e68c7a4b1b00fe0186275228c.zip
FreeBSD-src-9ce524bfd672801e68c7a4b1b00fe0186275228c.tar.gz
Allow the compiler to micro-optimize byte swapping functions by
evaluating them at compile time rather than at run time. As for x86 and amd64, this requires GCC and it's enabled only if __OPTIMIZE__ is defined (ie, if at least -O is used). Reviewed by: jake
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/include/endian.h39
1 files changed, 24 insertions, 15 deletions
diff --git a/sys/sparc64/include/endian.h b/sys/sparc64/include/endian.h
index f10032c..a2ff62b 100644
--- a/sys/sparc64/include/endian.h
+++ b/sys/sparc64/include/endian.h
@@ -67,17 +67,30 @@
#define BYTE_ORDER _BYTE_ORDER
#endif
-#ifdef __GNUC__
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
+#define __is_constant(x) __builtin_constant_p(x)
+#else
+#define __is_constant(x) 0
+#endif
+
+#define __bswap16_const(x) ((x >> 8) | ((x << 8) & 0xff00))
+#define __bswap32_const(x) ((x >> 24) | ((x >> 8) & 0xff00) | \
+ ((x << 8) & 0xff0000) | ((x << 24) & 0xff000000))
+#define __bswap64_const(x) ((x >> 56) | ((x >> 40) & 0xff00) | \
+ ((x >> 24) & 0xff0000) | ((x >> 8) & 0xff000000) | \
+ ((x << 8) & ((__uint64_t)0xff << 32)) | \
+ ((x << 24) & ((__uint64_t)0xff << 40)) | \
+ ((x << 40) & ((__uint64_t)0xff << 48)) | ((x << 56)))
static __inline __uint16_t
-__bswap16(__uint16_t _x)
+__bswap16_var(__uint16_t _x)
{
return ((_x >> 8) | ((_x << 8) & 0xff00));
}
static __inline __uint32_t
-__bswap32(__uint32_t _x)
+__bswap32_var(__uint32_t _x)
{
return ((_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
@@ -85,7 +98,7 @@ __bswap32(__uint32_t _x)
}
static __inline __uint64_t
-__bswap64(__uint64_t _x)
+__bswap64_var(__uint64_t _x)
{
return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
@@ -94,20 +107,16 @@ __bswap64(__uint64_t _x)
((_x << 40) & ((__uint64_t)0xff << 48)) | ((_x << 56)));
}
+#define __bswap16(x) (__is_constant(x) ? __bswap16_const(x) : \
+ __bswap16_var(x))
+#define __bswap32(x) (__is_constant(x) ? __bswap32_const(x) : \
+ __bswap32_var(x))
+#define __bswap64(x) (__is_constant(x) ? __bswap64_const(x) : \
+ __bswap64_var(x))
+
#define __htonl(x) ((__uint32_t)(x))
#define __htons(x) ((__uint16_t)(x))
#define __ntohl(x) ((__uint32_t)(x))
#define __ntohs(x) ((__uint16_t)(x))
-#else /* !__GNUC__ */
-
-/*
- * No optimizations are available for this compiler. Fall back to
- * non-optimized functions by defining the constant usually used to prevent
- * redefinition.
- */
-#define _BYTEORDER_FUNC_DEFINED
-
-#endif /* __GNUC__ */
-
#endif /* !_MACHINE_ENDIAN_H_ */
OpenPOWER on IntegriCloud