diff options
author | marius <marius@FreeBSD.org> | 2008-07-05 15:30:07 +0000 |
---|---|---|
committer | marius <marius@FreeBSD.org> | 2008-07-05 15:30:07 +0000 |
commit | 6960811ea0ce0242e245c9ee9ca7d8b435252b45 (patch) | |
tree | 20c72e33e368ef28a04a672d1699eee16a7ef131 /sys/sparc64 | |
parent | 55c10251475df943688a88af33745fab0edbe511 (diff) | |
download | FreeBSD-src-6960811ea0ce0242e245c9ee9ca7d8b435252b45.zip FreeBSD-src-6960811ea0ce0242e245c9ee9ca7d8b435252b45.tar.gz |
- Fix spelling and style.
- Use __FBSDID.
Diffstat (limited to 'sys/sparc64')
-rw-r--r-- | sys/sparc64/sparc64/in_cksum.c | 47 |
1 files changed, 26 insertions, 21 deletions
diff --git a/sys/sparc64/sparc64/in_cksum.c b/sys/sparc64/sparc64/in_cksum.c index 8239b11..7b6fcc1 100644 --- a/sys/sparc64/sparc64/in_cksum.c +++ b/sys/sparc64/sparc64/in_cksum.c @@ -57,6 +57,9 @@ * $FreeBSD$ */ +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + #include <sys/param.h> #include <sys/systm.h> #include <sys/mbuf.h> @@ -73,10 +76,10 @@ * This routine is very heavily used in the network * code and should be modified for each CPU to be as fast as possible. * - * This implementation is a sparc64 version. Most code was taken over and - * adapted from the i386. Some optimizations were changed to achieve (hopefully) - * better performance. - * This uses 64 bit loads, but 32 bit additions due to the lack of a 64-bit + * This implementation is a sparc64 version. Most code was taken over + * and adapted from the i386. Some optimizations were changed to achieve + * (hopefully) better performance. + * This uses 64-bit loads, but 32-bit additions due to the lack of a 64-bit * add-with-carry operation. */ @@ -84,28 +87,28 @@ * REDUCE() is actually not used that frequently... maybe a C implementation * would suffice. */ -#define REDUCE(sum, tmp) __asm __volatile( \ - "sll %2, 16, %1\n" \ - "addcc %2, %1, %0\n" \ - "srl %0, 16, %0\n" \ +#define REDUCE(sum, tmp) __asm __volatile( \ + "sll %2, 16, %1\n" \ + "addcc %2, %1, %0\n" \ + "srl %0, 16, %0\n" \ "addc %0, 0, %0" : "=r" (sum), "=r" (tmp) : "0" (sum)) /* - * Note that some of these macros depend on the flags being preserved between - * calls, so they should not be intermixed with other C statements. + * Note that some of these macros depend on the flags being preserved + * between calls, so they should not be intermixed with other C statements. */ -#define LD64_ADD32(sum, tmp, addr, n, mod) __asm __volatile( \ - "ldx [%3 + " #n "], %1\n" \ - "add" #mod " %2, %1, %0\n" \ - "srlx %1, 32, %1\n" \ +#define LD64_ADD32(sum, tmp, addr, n, mod) __asm __volatile( \ + "ldx [%3 + " #n "], %1\n" \ + "add" #mod " %2, %1, %0\n" \ + "srlx %1, 32, %1\n" \ "addccc %0, %1, %0" : "=r" (sum), "=r" (tmp) : "0" (sum), "r" (addr)) -#define LD32_ADD32(sum, tmp, addr, n, mod) __asm __volatile( \ - "lduw [%3 + " #n "], %1\n" \ - "add" #mod " %2, %1, %0\n" \ +#define LD32_ADD32(sum, tmp, addr, n, mod) __asm __volatile( \ + "lduw [%3 + " #n "], %1\n" \ + "add" #mod " %2, %1, %0\n" \ : "=r" (sum), "=r" (tmp) : "0" (sum), "r" (addr)) -#define MOP(sum) __asm __volatile( \ +#define MOP(sum) __asm __volatile( \ "addc %1, 0, %0" : "=r" (sum) : "0" (sum)) u_short @@ -229,7 +232,7 @@ skip_start: } else if (mlen == -1) { /* * This mbuf has odd number of bytes. - * There could be a word split betwen + * There could be a word split between * this mbuf and the next mbuf. * Save the last byte (to prepend to next mbuf). */ @@ -240,8 +243,10 @@ skip_start: if (len) printf("%s: out of data by %d\n", __func__, len); if (mlen == -1) { - /* The last mbuf has odd # of bytes. Follow the - standard (the odd byte is shifted left by 8 bits) */ + /* + * The last mbuf has odd # of bytes. Follow the + * standard (the odd byte is shifted left by 8 bits). + */ sum += su & 0xff00; } REDUCE(sum, tmp); |