diff options
Diffstat (limited to 'arch/ia64/include')
-rw-r--r-- | arch/ia64/include/asm/elf.h | 15 | ||||
-rw-r--r-- | arch/ia64/include/asm/io.h | 10 | ||||
-rw-r--r-- | arch/ia64/include/asm/sections.h | 14 | ||||
-rw-r--r-- | arch/ia64/include/asm/sn/bte.h | 9 |
4 files changed, 40 insertions, 8 deletions
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 5e0c1a6..2acb6b6 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -266,4 +266,19 @@ do { \ } \ } while (0) +/* + * format for entries in the Global Offset Table + */ +struct got_entry { + uint64_t val; +}; + +/* + * Layout of the Function Descriptor + */ +struct fdesc { + uint64_t ip; + uint64_t gp; +}; + #endif /* _ASM_IA64_ELF_H */ diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 260a85a..7f25750 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -19,6 +19,8 @@ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> */ +#include <asm/unaligned.h> + /* We don't use IO slowdowns on the ia64, but.. */ #define __SLOW_DOWN_IO do { } while (0) #define SLOW_DOWN_IO do { } while (0) @@ -241,7 +243,7 @@ __insw (unsigned long port, void *dst, unsigned long count) unsigned short *dp = dst; while (count--) - *dp++ = platform_inw(port); + put_unaligned(platform_inw(port), dp++); } static inline void @@ -250,7 +252,7 @@ __insl (unsigned long port, void *dst, unsigned long count) unsigned int *dp = dst; while (count--) - *dp++ = platform_inl(port); + put_unaligned(platform_inl(port), dp++); } static inline void @@ -268,7 +270,7 @@ __outsw (unsigned long port, const void *src, unsigned long count) const unsigned short *sp = src; while (count--) - platform_outw(*sp++, port); + platform_outw(get_unaligned(sp++), port); } static inline void @@ -277,7 +279,7 @@ __outsl (unsigned long port, const void *src, unsigned long count) const unsigned int *sp = src; while (count--) - platform_outl(*sp++, port); + platform_outl(get_unaligned(sp++), port); } /* diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index 7286e4a..f667998 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h @@ -6,6 +6,8 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ +#include <linux/elf.h> +#include <linux/uaccess.h> #include <asm-generic/sections.h> extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; @@ -21,5 +23,17 @@ extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_b extern char __start_unwind[], __end_unwind[]; extern char __start_ivt_text[], __end_ivt_text[]; +#undef dereference_function_descriptor +static inline void *dereference_function_descriptor(void *ptr) +{ + struct fdesc *desc = ptr; + void *p; + + if (!probe_kernel_address(&desc->ip, p)) + ptr = p; + return ptr; +} + + #endif /* _ASM_IA64_SECTIONS_H */ diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h index a0d214f..5efecf0 100644 --- a/arch/ia64/include/asm/sn/bte.h +++ b/arch/ia64/include/asm/sn/bte.h @@ -223,10 +223,11 @@ extern void bte_error_handler(unsigned long); * until the transfer is complete. In order to get the asynch * version of bte_copy, you must perform this check yourself. */ -#define BTE_UNALIGNED_COPY(src, dest, len, mode) \ - (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \ - (dest & L1_CACHE_MASK)) ? \ - bte_unaligned_copy(src, dest, len, mode) : \ +#define BTE_UNALIGNED_COPY(src, dest, len, mode) \ + (((len & (L1_CACHE_BYTES - 1)) || \ + (src & (L1_CACHE_BYTES - 1)) || \ + (dest & (L1_CACHE_BYTES - 1))) ? \ + bte_unaligned_copy(src, dest, len, mode) : \ bte_copy(src, dest, len, mode, NULL)) |