diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-08-26 22:03:08 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-08-26 22:03:08 -0700 |
commit | 40d0802b3eb47d57e2d57a5244a18cbbe9632e13 (patch) | |
tree | fd45987286080e1f711c2448f6fbdc7d362a0241 /include/linux/etherdevice.h | |
parent | 250ad8f55c06eb866cfb57f8d3ea6ff961a7d1d7 (diff) | |
download | op-kernel-dev-40d0802b3eb47d57e2d57a5244a18cbbe9632e13.zip op-kernel-dev-40d0802b3eb47d57e2d57a5244a18cbbe9632e13.tar.gz |
gro: __napi_gro_receive() optimizations
compare_ether_header() can have a special implementation on 64 bit
arches if CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined.
__napi_gro_receive() and vlan_gro_common() can avoid a conditional
branch to perform device match.
On x86_64, __napi_gro_receive() has now 38 instructions instead of 53
As gcc-4.4.3 still choose to not inline it, add inline keyword to this
performance critical function.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/etherdevice.h')
-rw-r--r-- | include/linux/etherdevice.h | 18 |
1 files changed, 17 insertions, 1 deletions
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 2308fbb..fb6aa60 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -237,13 +237,29 @@ static inline bool is_etherdev_addr(const struct net_device *dev, * entry points. */ -static inline int compare_ether_header(const void *a, const void *b) +static inline unsigned long compare_ether_header(const void *a, const void *b) { +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + unsigned long fold; + + /* + * We want to compare 14 bytes: + * [a0 ... a13] ^ [b0 ... b13] + * Use two long XOR, ORed together, with an overlap of two bytes. + * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | + * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] + * This means the [a6 a7] ^ [b6 b7] part is done two times. + */ + fold = *(unsigned long *)a ^ *(unsigned long *)b; + fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); + return fold; +#else u32 *a32 = (u32 *)((u8 *)a + 2); u32 *b32 = (u32 *)((u8 *)b + 2); return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); +#endif } #endif /* _LINUX_ETHERDEVICE_H */ |