diff options
Diffstat (limited to 'include/asm-sh')
-rw-r--r-- | include/asm-sh/atomic.h | 1 | ||||
-rw-r--r-- | include/asm-sh/bitops.h | 16 | ||||
-rw-r--r-- | include/asm-sh/checksum.h | 2 | ||||
-rw-r--r-- | include/asm-sh/fixmap.h | 2 | ||||
-rw-r--r-- | include/asm-sh/watchdog.h | 3 |
5 files changed, 12 insertions, 12 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index fb627de..049eb2d 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -14,6 +14,7 @@ typedef struct { volatile int counter; } atomic_t; #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) +#include <linux/compiler.h> #include <asm/system.h> /* diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h index e34f825..1c16792 100644 --- a/include/asm-sh/bitops.h +++ b/include/asm-sh/bitops.h @@ -6,7 +6,7 @@ /* For __swab32 */ #include <asm/byteorder.h> -static __inline__ void set_bit(int nr, volatile void * addr) +static inline void set_bit(int nr, volatile void * addr) { int mask; volatile unsigned int *a = addr; @@ -24,7 +24,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) */ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -static __inline__ void clear_bit(int nr, volatile void * addr) +static inline void clear_bit(int nr, volatile void * addr) { int mask; volatile unsigned int *a = addr; @@ -37,7 +37,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static __inline__ void change_bit(int nr, volatile void * addr) +static inline void change_bit(int nr, volatile void * addr) { int mask; volatile unsigned int *a = addr; @@ -50,7 +50,7 @@ static __inline__ void change_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static __inline__ int test_and_set_bit(int nr, volatile void * addr) +static inline int test_and_set_bit(int nr, volatile void * addr) { int mask, retval; volatile unsigned int *a = addr; @@ -66,7 +66,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) return retval; } -static __inline__ int test_and_clear_bit(int nr, volatile void * addr) +static inline int test_and_clear_bit(int nr, volatile void * addr) { int mask, retval; volatile unsigned int *a = addr; @@ -82,7 +82,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) return retval; } -static __inline__ int test_and_change_bit(int nr, volatile void * addr) +static inline int test_and_change_bit(int nr, volatile void * addr) { int mask, retval; volatile unsigned int *a = addr; @@ -100,7 +100,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) #include <asm-generic/bitops/non-atomic.h> -static __inline__ unsigned long ffz(unsigned long word) +static inline unsigned long ffz(unsigned long word) { unsigned long result; @@ -120,7 +120,7 @@ static __inline__ unsigned long ffz(unsigned long word) * * Undefined if no bit exists, so code should check against 0 first. */ -static __inline__ unsigned long __ffs(unsigned long word) +static inline unsigned long __ffs(unsigned long word) { unsigned long result; diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h index fa03b30..08168af 100644 --- a/include/asm-sh/checksum.h +++ b/include/asm-sh/checksum.h @@ -159,6 +159,7 @@ static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len) } #define _HAVE_ARCH_IPV6_CSUM +#ifdef CONFIG_IPV6 static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, @@ -194,6 +195,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, return csum_fold(sum); } +#endif /* * Copy and checksum to user diff --git a/include/asm-sh/fixmap.h b/include/asm-sh/fixmap.h index 412bcca..458e9fa 100644 --- a/include/asm-sh/fixmap.h +++ b/include/asm-sh/fixmap.h @@ -25,7 +25,7 @@ * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. We allocate these special addresses - * from the end of virtual memory (0xfffff000) backwards. + * from the end of P3 backwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. diff --git a/include/asm-sh/watchdog.h b/include/asm-sh/watchdog.h index 09ca419..d19ea62 100644 --- a/include/asm-sh/watchdog.h +++ b/include/asm-sh/watchdog.h @@ -62,7 +62,6 @@ /** * sh_wdt_read_cnt - Read from Counter - * * Reads back the WTCNT value. */ static inline __u8 sh_wdt_read_cnt(void) @@ -72,7 +71,6 @@ static inline __u8 sh_wdt_read_cnt(void) /** * sh_wdt_write_cnt - Write to Counter - * * @val: Value to write * * Writes the given value @val to the lower byte of the timer counter. @@ -95,7 +93,6 @@ static inline __u8 sh_wdt_read_csr(void) /** * sh_wdt_write_csr - Write to Control/Status Register - * * @val: Value to write * * Writes the given value @val to the lower byte of the control/status |