diff options
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/include/asm/machvec.h | 2 | ||||
-rw-r--r-- | arch/alpha/include/asm/system.h | 547 | ||||
-rw-r--r-- | arch/alpha/include/asm/types.h | 5 | ||||
-rw-r--r-- | arch/alpha/include/asm/uaccess.h | 12 | ||||
-rw-r--r-- | arch/alpha/include/asm/xchg.h | 258 | ||||
-rw-r--r-- | arch/alpha/kernel/err_ev6.c | 4 | ||||
-rw-r--r-- | arch/alpha/kernel/err_ev7.c | 6 | ||||
-rw-r--r-- | arch/alpha/kernel/err_marvel.c | 40 | ||||
-rw-r--r-- | arch/alpha/kernel/err_titan.c | 28 | ||||
-rw-r--r-- | arch/alpha/kernel/pci.c | 2 | ||||
-rw-r--r-- | arch/alpha/kernel/pci_iommu.c | 34 | ||||
-rw-r--r-- | arch/alpha/kernel/proto.h | 16 | ||||
-rw-r--r-- | arch/alpha/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/alpha/kernel/smc37c669.c | 4 | ||||
-rw-r--r-- | arch/alpha/kernel/sys_jensen.c | 3 | ||||
-rw-r--r-- | arch/alpha/kernel/sys_sable.c | 4 | ||||
-rw-r--r-- | arch/alpha/kernel/traps.c | 2 |
17 files changed, 392 insertions, 577 deletions
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h index fea4ea7..13cd427 100644 --- a/arch/alpha/include/asm/machvec.h +++ b/arch/alpha/include/asm/machvec.h @@ -80,7 +80,7 @@ struct alpha_machine_vector void (*update_irq_hw)(unsigned long, unsigned long, int); void (*ack_irq)(unsigned long); void (*device_interrupt)(unsigned long vector); - void (*machine_check)(u64 vector, u64 la); + void (*machine_check)(unsigned long vector, unsigned long la); void (*smp_callin)(void); void (*init_arch)(void); diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h index afe20fa..5aa40cc 100644 --- a/arch/alpha/include/asm/system.h +++ b/arch/alpha/include/asm/system.h @@ -309,518 +309,71 @@ extern int __min_ipl; #define tbia() __tbi(-2, /* no second argument */) /* - * Atomic exchange. - * Since it can be used to implement critical sections - * it must clobber "memory" (also for interrupts in UP). + * Atomic exchange routines. */ -static inline unsigned long -__xchg_u8(volatile char *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " insbl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extbl %2,%4,%0\n" - " mskbl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u16(volatile short *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " inswl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extwl %2,%4,%0\n" - " mskwl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u32(volatile int *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldl_l %0,%4\n" - " bis $31,%3,%1\n" - " stl_c %1,%2\n" - " beq %1,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -static inline unsigned long -__xchg_u64(volatile long *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldq_l %0,%4\n" - " bis $31,%3,%1\n" - " stq_c %1,%2\n" - " beq %1,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); +#define __ASM__MB +#define ____xchg(type, args...) __xchg ## type ## _local(args) +#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) +#include <asm/xchg.h> - return val; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid xchg(). */ -extern void __xchg_called_with_bad_pointer(void); - -#define __xchg(ptr, x, size) \ -({ \ - unsigned long __xchg__res; \ - volatile void *__xchg__ptr = (ptr); \ - switch (size) { \ - case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ - case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ - case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ - case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ - default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ - } \ - __xchg__res; \ -}) - -#define xchg(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ +#define xchg_local(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ }) -static inline unsigned long -__xchg_u8_local(volatile char *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " insbl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extbl %2,%4,%0\n" - " mskbl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u16_local(volatile short *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " inswl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extwl %2,%4,%0\n" - " mskwl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u32_local(volatile int *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldl_l %0,%4\n" - " bis $31,%3,%1\n" - " stl_c %1,%2\n" - " beq %1,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -static inline unsigned long -__xchg_u64_local(volatile long *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldq_l %0,%4\n" - " bis $31,%3,%1\n" - " stq_c %1,%2\n" - " beq %1,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -#define __xchg_local(ptr, x, size) \ -({ \ - unsigned long __xchg__res; \ - volatile void *__xchg__ptr = (ptr); \ - switch (size) { \ - case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \ - case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \ - case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \ - case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \ - default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ - } \ - __xchg__res; \ -}) - -#define xchg_local(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ - sizeof(*(ptr))); \ +#define cmpxchg_local(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, \ + sizeof(*(ptr))); \ }) -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - * - * The memory barrier should be placed in SMP only when we actually - * make the change. If we don't change anything (so if the returned - * prev is equal to old) then we aren't acquiring anything new and - * we don't need any memory barrier as far I can tell. - */ - -#define __HAVE_ARCH_CMPXCHG 1 - -static inline unsigned long -__cmpxchg_u8(volatile char *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " insbl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extbl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskbl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u16(volatile short *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " inswl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extwl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskwl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u32(volatile int *m, int old, int new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldl_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stl_c %1,%2\n" - " beq %1,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} +#define cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ + }) -static inline unsigned long -__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldq_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stq_c %1,%2\n" - " beq %1,3f\n" #ifdef CONFIG_SMP - " mb\n" +#undef __ASM__MB +#define __ASM__MB "\tmb\n" #endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - switch (size) { - case 1: - return __cmpxchg_u8(ptr, old, new); - case 2: - return __cmpxchg_u16(ptr, old, new); - case 4: - return __cmpxchg_u32(ptr, old, new); - case 8: - return __cmpxchg_u64(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ +#undef ____xchg +#undef ____cmpxchg +#define ____xchg(type, args...) __xchg ##type(args) +#define ____cmpxchg(type, args...) __cmpxchg ##type(args) +#include <asm/xchg.h> + +#define xchg(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ }) -#define cmpxchg64(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ - }) - -static inline unsigned long -__cmpxchg_u8_local(volatile char *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " insbl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extbl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskbl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u16_local(volatile short *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " inswl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extwl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskwl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u32_local(volatile int *m, int old, int new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldl_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stl_c %1,%2\n" - " beq %1,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldq_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stq_c %1,%2\n" - " beq %1,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, - int size) -{ - switch (size) { - case 1: - return __cmpxchg_u8_local(ptr, old, new); - case 2: - return __cmpxchg_u16_local(ptr, old, new); - case 4: - return __cmpxchg_u32_local(ptr, old, new); - case 8: - return __cmpxchg_u64_local(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} -#define cmpxchg_local(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ +#define cmpxchg(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr)));\ }) -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ + +#define cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ }) +#undef __ASM__MB +#undef ____cmpxchg + +#define __HAVE_ARCH_CMPXCHG 1 #endif /* __ASSEMBLY__ */ diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h index c154135..f072f34 100644 --- a/arch/alpha/include/asm/types.h +++ b/arch/alpha/include/asm/types.h @@ -8,7 +8,12 @@ * not a major issue. However, for interoperability, libraries still * need to be careful to avoid a name clashes. */ + +#ifdef __KERNEL__ +#include <asm-generic/int-ll64.h> +#else #include <asm-generic/int-l64.h> +#endif #ifndef __ASSEMBLY__ diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 22de3b4..163f305 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -498,13 +498,13 @@ struct exception_table_entry }; /* Returns the new pc */ -#define fixup_exception(map_reg, fixup, pc) \ +#define fixup_exception(map_reg, _fixup, pc) \ ({ \ - if ((fixup)->fixup.bits.valreg != 31) \ - map_reg((fixup)->fixup.bits.valreg) = 0; \ - if ((fixup)->fixup.bits.errreg != 31) \ - map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \ - (pc) + (fixup)->fixup.bits.nextinsn; \ + if ((_fixup)->fixup.bits.valreg != 31) \ + map_reg((_fixup)->fixup.bits.valreg) = 0; \ + if ((_fixup)->fixup.bits.errreg != 31) \ + map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \ + (pc) + (_fixup)->fixup.bits.nextinsn; \ }) diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h new file mode 100644 index 0000000..beba1b8 --- /dev/null +++ b/arch/alpha/include/asm/xchg.h @@ -0,0 +1,258 @@ +#ifndef __ALPHA_SYSTEM_H +#error Do not include xchg.h directly! +#else +/* + * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code + * except that local version do not have the expensive memory barrier. + * So this file is included twice from asm/system.h. + */ + +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " insbl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extbl %2,%4,%0\n" + " mskbl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" + __ASM__MB + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " inswl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extwl %2,%4,%0\n" + " mskwl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" + __ASM__MB + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldl_l %0,%4\n" + " bis $31,%3,%1\n" + " stl_c %1,%2\n" + " beq %1,2f\n" + __ASM__MB + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldq_l %0,%4\n" + " bis $31,%3,%1\n" + " stq_c %1,%2\n" + " beq %1,2f\n" + __ASM__MB + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid xchg(). */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____xchg(, volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + return ____xchg(_u8, ptr, x); + case 2: + return ____xchg(_u16, ptr, x); + case 4: + return ____xchg(_u32, ptr, x); + case 8: + return ____xchg(_u64, ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ + +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " insbl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extbl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskbl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" + __ASM__MB + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " inswl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extwl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskwl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" + __ASM__MB + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldl_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stl_c %1,%2\n" + " beq %1,3f\n" + __ASM__MB + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldq_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stq_c %1,%2\n" + " beq %1,3f\n" + __ASM__MB + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new, + int size) +{ + switch (size) { + case 1: + return ____cmpxchg(_u8, ptr, old, new); + case 2: + return ____cmpxchg(_u16, ptr, old, new); + case 4: + return ____cmpxchg(_u32, ptr, old, new); + case 8: + return ____cmpxchg(_u64, ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#endif diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c index 11aee01..985e5c1 100644 --- a/arch/alpha/kernel/err_ev6.c +++ b/arch/alpha/kernel/err_ev6.c @@ -157,8 +157,8 @@ ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, err_print_prefix, streamname[stream], bitsname[bits], sourcename[source]); - printk("%s Address: 0x%016lx\n" - " Syndrome[upper.lower]: %02lx.%02lx\n", + printk("%s Address: 0x%016llx\n" + " Syndrome[upper.lower]: %02llx.%02llx\n", err_print_prefix, c_addr, c2_syn, c1_syn); diff --git a/arch/alpha/kernel/err_ev7.c b/arch/alpha/kernel/err_ev7.c index 68cd493..73770c6 100644 --- a/arch/alpha/kernel/err_ev7.c +++ b/arch/alpha/kernel/err_ev7.c @@ -246,13 +246,13 @@ ev7_process_pal_subpacket(struct el_subpacket *header) switch(header->type) { case EL_TYPE__PAL__LOGOUT_FRAME: - printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n", + printk("%s*** MCHK occurred on LPID %ld (RBOX %llx)\n", err_print_prefix, packet->by_type.logout.whami, packet->by_type.logout.rbox_whami); el_print_timestamp(&packet->by_type.logout.timestamp); - printk("%s EXC_ADDR: %016lx\n" - " HALT_CODE: %lx\n", + printk("%s EXC_ADDR: %016llx\n" + " HALT_CODE: %llx\n", err_print_prefix, packet->by_type.logout.exc_addr, packet->by_type.logout.halt_code); diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c index 413bf37..6bfd243 100644 --- a/arch/alpha/kernel/err_marvel.c +++ b/arch/alpha/kernel/err_marvel.c @@ -129,7 +129,7 @@ marvel_print_po7_crrct_sym(u64 crrct_sym) printk("%s Correctable Error Symptoms:\n" - "%s Syndrome: 0x%lx\n", + "%s Syndrome: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN)); marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC)); @@ -186,7 +186,7 @@ marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask) uncrr_sym &= valid_mask; if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN)) - printk("%s Syndrome: 0x%lx\n", + printk("%s Syndrome: 0x%llx\n", err_print_prefix, EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN)); @@ -307,7 +307,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym) sprintf(opcode_str, "BlkIO"); break; default: - sprintf(opcode_str, "0x%lx\n", + sprintf(opcode_str, "0x%llx\n", EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)); break; } @@ -321,7 +321,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym) opcode_str); if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) - printk("%s Packet Offset 0x%08lx\n", + printk("%s Packet Offset 0x%08llx\n", err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF)); } @@ -480,8 +480,8 @@ marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io) printk("%s Lost Error\n", err_print_prefix); printk("%s Failing Packet:\n" - "%s Cycle 1: %016lx\n" - "%s Cycle 2: %016lx\n", + "%s Cycle 1: %016llx\n" + "%s Cycle 2: %016llx\n", err_print_prefix, err_print_prefix, io->po7_err_pkt0, err_print_prefix, io->po7_err_pkt1); @@ -515,9 +515,9 @@ marvel_print_pox_tlb_err(u64 tlb_err) if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID)) return; - printk("%s TLB Error on index 0x%lx:\n" + printk("%s TLB Error on index 0x%llx:\n" "%s - %s\n" - "%s - Addr: 0x%016lx\n", + "%s - Addr: 0x%016llx\n", err_print_prefix, EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR), err_print_prefix, @@ -579,7 +579,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt) sprintf(message, "Uncorrectable Split Write Data Error"); break; default: - sprintf(message, "%08lx\n", + sprintf(message, "%08llx\n", EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE)); break; } @@ -620,9 +620,9 @@ marvel_print_pox_trans_sum(u64 trans_sum) return; printk("%s Transaction Summary:\n" - "%s Command: 0x%lx - %s\n" - "%s Address: 0x%016lx%s\n" - "%s PCI-X Master Slot: 0x%lx\n", + "%s Command: 0x%llx - %s\n" + "%s Address: 0x%016llx%s\n" + "%s PCI-X Master Slot: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD), @@ -964,12 +964,12 @@ marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) #if 0 printk("%s PORT 7 ERROR:\n" - "%s PO7_ERROR_SUM: %016lx\n" - "%s PO7_UNCRR_SYM: %016lx\n" - "%s PO7_CRRCT_SYM: %016lx\n" - "%s PO7_UGBGE_SYM: %016lx\n" - "%s PO7_ERR_PKT0: %016lx\n" - "%s PO7_ERR_PKT1: %016lx\n", + "%s PO7_ERROR_SUM: %016llx\n" + "%s PO7_UNCRR_SYM: %016llx\n" + "%s PO7_CRRCT_SYM: %016llx\n" + "%s PO7_UGBGE_SYM: %016llx\n" + "%s PO7_ERR_PKT0: %016llx\n" + "%s PO7_ERR_PKT1: %016llx\n", err_print_prefix, err_print_prefix, io->po7_error_sum, err_print_prefix, io->po7_uncrr_sym, @@ -987,12 +987,12 @@ marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) continue; - printk("%s PID %u PORT %d POx_ERR_SUM: %016lx\n", + printk("%s PID %u PORT %d POx_ERR_SUM: %016llx\n", err_print_prefix, lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); - printk("%s [ POx_FIRST_ERR: %016lx ]\n", + printk("%s [ POx_FIRST_ERR: %016llx ]\n", err_print_prefix, io->ports[i].pox_first_err); marvel_print_pox_err(io->ports[i].pox_first_err, &io->ports[i]); diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index 257449e..c7e28a8 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c @@ -107,12 +107,12 @@ titan_parse_p_serror(int which, u64 serror, int print) if (!print) return status; - printk("%s PChip %d SERROR: %016lx\n", + printk("%s PChip %d SERROR: %016llx\n", err_print_prefix, which, serror); if (serror & TITAN__PCHIP_SERROR__ECCMASK) { printk("%s %sorrectable ECC Error:\n" " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" - " Address: 0x%lx\n", + " Address: 0x%llx\n", err_print_prefix, (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], @@ -223,7 +223,7 @@ titan_parse_p_perror(int which, int port, u64 perror, int print) if (!print) return status; - printk("%s PChip %d %cPERROR: %016lx\n", + printk("%s PChip %d %cPERROR: %016llx\n", err_print_prefix, which, port ? 'A' : 'G', perror); if (perror & TITAN__PCHIP_PERROR__IPTPW) @@ -316,7 +316,7 @@ titan_parse_p_agperror(int which, u64 agperror, int print) addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); - printk("%s PChip %d AGPERROR: %016lx\n", err_print_prefix, + printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix, which, agperror); if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) printk("%s No Window\n", err_print_prefix); @@ -597,16 +597,16 @@ privateer_process_680_frame(struct el_common *mchk_header, int print) return status; /* TODO - decode instead of just dumping... */ - printk("%s Summary Flags: %016lx\n" - " CChip DIRx: %016lx\n" - " System Management IR: %016lx\n" - " CPU IR: %016lx\n" - " Power Supply IR: %016lx\n" - " LM78 Fault Status: %016lx\n" - " System Doors: %016lx\n" - " Temperature Warning: %016lx\n" - " Fan Control: %016lx\n" - " Fatal Power Down Code: %016lx\n", + printk("%s Summary Flags: %016llx\n" + " CChip DIRx: %016llx\n" + " System Management IR: %016llx\n" + " CPU IR: %016llx\n" + " Power Supply IR: %016llx\n" + " LM78 Fault Status: %016llx\n" + " System Doors: %016llx\n" + " Temperature Warning: %016llx\n" + " Fan Control: %016llx\n" + " Fatal Power Down Code: %016llx\n", err_print_prefix, emchk->summary, emchk->c_dirx, diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index a3b9388..a91ba28 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -168,7 +168,7 @@ pcibios_align_resource(void *data, struct resource *res, */ /* Align to multiple of size of minimum base. */ - alignto = max(0x1000UL, align); + alignto = max_t(resource_size_t, 0x1000, align); start = ALIGN(start, alignto); if (hose->sparse_mem_base && size <= 7 * 16*MB) { if (((start / (16*MB)) & 0x7) == 0) { diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index b9094da..bfb880a 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -247,7 +247,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, && paddr + size <= __direct_map_size) { ret = paddr + __direct_map_base; - DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; @@ -258,7 +258,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, if (dac_allowed) { ret = paddr + alpha_mv.pci_dac_offset; - DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; @@ -299,7 +299,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, ret = arena->dma_base + dma_ofs * PAGE_SIZE; ret += (unsigned long)cpu_addr & ~PAGE_MASK; - DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n", + DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n", cpu_addr, size, npages, ret, __builtin_return_address(0)); return ret; @@ -355,14 +355,14 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, && dma_addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ - DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n", + DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); return; } if (dma_addr > 0xffffffff) { - DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n", + DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); return; } @@ -373,9 +373,9 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; if (dma_ofs * PAGE_SIZE >= arena->size) { - printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx " - " base %lx size %x\n", dma_addr, arena->dma_base, - arena->size); + printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx " + " base %llx size %x\n", + dma_addr, arena->dma_base, arena->size); return; BUG(); } @@ -394,7 +394,7 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, spin_unlock_irqrestore(&arena->lock, flags); - DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n", + DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n", dma_addr, size, npages, __builtin_return_address(0)); } EXPORT_SYMBOL(pci_unmap_single); @@ -444,7 +444,7 @@ try_again: goto try_again; } - DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", + DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n", size, cpu_addr, *dma_addrp, __builtin_return_address(0)); return cpu_addr; @@ -464,7 +464,7 @@ pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); free_pages((unsigned long)cpu_addr, get_order(size)); - DBGA2("pci_free_consistent: [%x,%lx] from %p\n", + DBGA2("pci_free_consistent: [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); } EXPORT_SYMBOL(pci_free_consistent); @@ -551,7 +551,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = paddr + __direct_map_base; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", + DBGA(" sg_fill: [%p,%lx] -> direct %llx\n", __va(paddr), size, out->dma_address); return 0; @@ -563,7 +563,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = paddr + alpha_mv.pci_dac_offset; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", + DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n", __va(paddr), size, out->dma_address); return 0; @@ -589,7 +589,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; out->dma_length = size; - DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", + DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", __va(paddr), size, out->dma_address, npages); /* All virtually contiguous. We need to find the length of each @@ -752,7 +752,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, if (addr > 0xffffffff) { /* It's a DAC address -- nothing to do. */ - DBGA(" (%ld) DAC [%lx,%lx]\n", + DBGA(" (%ld) DAC [%llx,%zx]\n", sg - end + nents, addr, size); continue; } @@ -760,12 +760,12 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, if (addr >= __direct_map_base && addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ - DBGA(" (%ld) direct [%lx,%lx]\n", + DBGA(" (%ld) direct [%llx,%zx]\n", sg - end + nents, addr, size); continue; } - DBGA(" (%ld) sg [%lx,%lx]\n", + DBGA(" (%ld) sg [%llx,%zx]\n", sg - end + nents, addr, size); npages = iommu_num_pages(addr, size, PAGE_SIZE); diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h index fe14c67..567f259 100644 --- a/arch/alpha/kernel/proto.h +++ b/arch/alpha/kernel/proto.h @@ -20,7 +20,7 @@ struct pci_controller; extern struct pci_ops apecs_pci_ops; extern void apecs_init_arch(void); extern void apecs_pci_clr_err(void); -extern void apecs_machine_check(u64, u64); +extern void apecs_machine_check(unsigned long vector, unsigned long la_ptr); extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_cia.c */ @@ -29,7 +29,7 @@ extern void cia_init_pci(void); extern void cia_init_arch(void); extern void pyxis_init_arch(void); extern void cia_kill_arch(int); -extern void cia_machine_check(u64, u64); +extern void cia_machine_check(unsigned long vector, unsigned long la_ptr); extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_irongate.c */ @@ -42,7 +42,7 @@ extern void irongate_machine_check(u64, u64); /* core_lca.c */ extern struct pci_ops lca_pci_ops; extern void lca_init_arch(void); -extern void lca_machine_check(u64, u64); +extern void lca_machine_check(unsigned long vector, unsigned long la_ptr); extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_marvel.c */ @@ -64,7 +64,7 @@ void io7_clear_errors(struct io7 *io7); extern struct pci_ops mcpcia_pci_ops; extern void mcpcia_init_arch(void); extern void mcpcia_init_hoses(void); -extern void mcpcia_machine_check(u64, u64); +extern void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr); extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_polaris.c */ @@ -72,14 +72,14 @@ extern struct pci_ops polaris_pci_ops; extern int polaris_read_config_dword(struct pci_dev *, int, u32 *); extern int polaris_write_config_dword(struct pci_dev *, int, u32); extern void polaris_init_arch(void); -extern void polaris_machine_check(u64, u64); +extern void polaris_machine_check(unsigned long vector, unsigned long la_ptr); #define polaris_pci_tbi ((void *)0) /* core_t2.c */ extern struct pci_ops t2_pci_ops; extern void t2_init_arch(void); extern void t2_kill_arch(int); -extern void t2_machine_check(u64, u64); +extern void t2_machine_check(unsigned long vector, unsigned long la_ptr); extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_titan.c */ @@ -94,14 +94,14 @@ extern struct _alpha_agp_info *titan_agp_info(void); extern struct pci_ops tsunami_pci_ops; extern void tsunami_init_arch(void); extern void tsunami_kill_arch(int); -extern void tsunami_machine_check(u64, u64); +extern void tsunami_machine_check(unsigned long vector, unsigned long la_ptr); extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_wildfire.c */ extern struct pci_ops wildfire_pci_ops; extern void wildfire_init_arch(void); extern void wildfire_kill_arch(int); -extern void wildfire_machine_check(u64, u64); +extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr); extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); extern int wildfire_pa_to_nid(unsigned long); extern int wildfire_cpuid_to_nid(int); diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 02bee69..80df86c 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -1255,7 +1255,7 @@ show_cpuinfo(struct seq_file *f, void *slot) platform_string(), nr_processors); #ifdef CONFIG_SMP - seq_printf(f, "cpus active\t\t: %d\n" + seq_printf(f, "cpus active\t\t: %u\n" "cpu active mask\t\t: %016lx\n", num_online_cpus(), cpus_addr(cpu_possible_map)[0]); #endif diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c index fd467b2..bca5bda 100644 --- a/arch/alpha/kernel/smc37c669.c +++ b/arch/alpha/kernel/smc37c669.c @@ -2542,8 +2542,8 @@ void __init SMC669_Init ( int index ) SMC37c669_display_device_info( ); #endif local_irq_restore(flags); - printk( "SMC37c669 Super I/O Controller found @ 0x%lx\n", - (unsigned long) SMC_base ); + printk( "SMC37c669 Super I/O Controller found @ 0x%p\n", + SMC_base ); } else { local_irq_restore(flags); diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index e2516f9..2b5caf3 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c @@ -244,12 +244,11 @@ jensen_init_arch(void) } static void -jensen_machine_check (u64 vector, u64 la) +jensen_machine_check(unsigned long vector, unsigned long la) { printk(KERN_CRIT "Machine check\n"); } - /* * The System Vector */ diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index d232e42..9e26325 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c @@ -453,7 +453,7 @@ sable_lynx_enable_irq(unsigned int irq) sable_lynx_irq_swizzle->update_irq_hw(bit, mask); spin_unlock(&sable_lynx_irq_lock); #if 0 - printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", + printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n", __func__, mask, bit, irq); #endif } @@ -469,7 +469,7 @@ sable_lynx_disable_irq(unsigned int irq) sable_lynx_irq_swizzle->update_irq_hw(bit, mask); spin_unlock(&sable_lynx_irq_lock); #if 0 - printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", + printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n", __func__, mask, bit, irq); #endif } diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index cefc5a3..6ee7655 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -623,7 +623,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, } lock_kernel(); - printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n", + printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", pc, va, opcode, reg); do_exit(SIGSEGV); |