diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-28 16:26:12 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-28 16:26:12 +0100 |
commit | 7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch) | |
tree | e730a4565e0318140d2fbd2f0415d18a339d7336 /include/asm-generic | |
parent | 41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff) | |
parent | 0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff) | |
download | op-kernel-dev-7a9787e1eba95a166265e6a260cf30af04ef0a99.zip op-kernel-dev-7a9787e1eba95a166265e6a260cf30af04ef0a99.tar.gz |
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/Kbuild.asm | 6 | ||||
-rw-r--r-- | include/asm-generic/bug.h | 37 | ||||
-rw-r--r-- | include/asm-generic/dma-coherent.h | 32 | ||||
-rw-r--r-- | include/asm-generic/dma-mapping-broken.h | 2 | ||||
-rw-r--r-- | include/asm-generic/dma-mapping.h | 4 | ||||
-rw-r--r-- | include/asm-generic/gpio.h | 56 | ||||
-rw-r--r-- | include/asm-generic/int-ll64.h | 2 | ||||
-rw-r--r-- | include/asm-generic/ioctl.h | 4 | ||||
-rw-r--r-- | include/asm-generic/mutex-dec.h | 26 | ||||
-rw-r--r-- | include/asm-generic/mutex-xchg.h | 9 | ||||
-rw-r--r-- | include/asm-generic/pci-dma-compat.h | 4 | ||||
-rw-r--r-- | include/asm-generic/pgtable-nopmd.h | 6 | ||||
-rw-r--r-- | include/asm-generic/rtc.h | 36 | ||||
-rw-r--r-- | include/asm-generic/sections.h | 6 | ||||
-rw-r--r-- | include/asm-generic/siginfo.h | 2 | ||||
-rw-r--r-- | include/asm-generic/statfs.h | 65 | ||||
-rw-r--r-- | include/asm-generic/syscall.h | 141 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 43 |
18 files changed, 389 insertions, 92 deletions
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 7cd25b8..1870d5e 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm @@ -1,6 +1,10 @@ +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ + $(srctree)/include/asm-$(SRCARCH)/kvm.h),) header-y += kvm.h +endif -ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),) +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ + $(srctree)/include/asm-$(SRCARCH)/a.out.h),) unifdef-y += a.out.h endif unifdef-y += auxvec.h diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 2632328..12c07c1 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -22,7 +22,7 @@ struct bug_entry { #ifndef HAVE_ARCH_BUG #define BUG() do { \ - printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ + printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ panic("BUG!"); \ } while (0) #endif @@ -34,9 +34,14 @@ struct bug_entry { #ifndef __WARN #ifndef __ASSEMBLY__ extern void warn_on_slowpath(const char *file, const int line); +extern void warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); #define WANT_WARN_ON_SLOWPATH #endif #define __WARN() warn_on_slowpath(__FILE__, __LINE__) +#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg) +#else +#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) #endif #ifndef WARN_ON @@ -48,6 +53,15 @@ extern void warn_on_slowpath(const char *file, const int line); }) #endif +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + #else /* !CONFIG_BUG */ #ifndef HAVE_ARCH_BUG #define BUG() @@ -63,6 +77,14 @@ extern void warn_on_slowpath(const char *file, const int line); unlikely(__ret_warn_on); \ }) #endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + #endif #define WARN_ON_ONCE(condition) ({ \ @@ -75,6 +97,19 @@ extern void warn_on_slowpath(const char *file, const int line); unlikely(__ret_warn_once); \ }) +#define WARN_ONCE(condition, format...) ({ \ + static int __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN(!__warned, format)) \ + __warned = 1; \ + unlikely(__ret_warn_once); \ +}) + +#define WARN_ON_RATELIMIT(condition, state) \ + WARN_ON((condition) && __ratelimit(state)) + #ifdef CONFIG_SMP # define WARN_ON_SMP(x) WARN_ON(x) #else diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h new file mode 100644 index 0000000..85a3ffa --- /dev/null +++ b/include/asm-generic/dma-coherent.h @@ -0,0 +1,32 @@ +#ifndef DMA_COHERENT_H +#define DMA_COHERENT_H + +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT +/* + * These two functions are only for dma allocator. + * Don't use them in device drivers. + */ +int dma_alloc_from_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret); +int dma_release_from_coherent(struct device *dev, int order, void *vaddr); + +/* + * Standard interface + */ +#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY +extern int +dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, + dma_addr_t device_addr, size_t size, int flags); + +extern void +dma_release_declared_memory(struct device *dev); + +extern void * +dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); +#else +#define dma_alloc_from_coherent(dev, size, handle, ret) (0) +#define dma_release_from_coherent(dev, order, vaddr) (0) +#endif + +#endif diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index e2468f8..82cd0cb 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, #define dma_sync_sg_for_device dma_sync_sg_for_cpu extern int -dma_mapping_error(dma_addr_t dma_addr); +dma_mapping_error(struct device *dev, dma_addr_t dma_addr); extern int dma_supported(struct device *dev, u64 mask); diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 783ab99..189486c 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, } static inline int -dma_mapping_error(dma_addr_t dma_addr) +dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return pci_dma_mapping_error(dma_addr); + return pci_dma_mapping_error(to_pci_dev(dev), dma_addr); } diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 6be061d..81797ec 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -2,8 +2,9 @@ #define _ASM_GENERIC_GPIO_H #include <linux/types.h> +#include <linux/errno.h> -#ifdef CONFIG_HAVE_GPIO_LIB +#ifdef CONFIG_GPIOLIB #include <linux/compiler.h> @@ -13,7 +14,7 @@ * * While the GPIO programming interface defines valid GPIO numbers * to be in the range 0..MAX_INT, this library restricts them to the - * smaller range 0..ARCH_NR_GPIOS. + * smaller range 0..ARCH_NR_GPIOS-1. */ #ifndef ARCH_NR_GPIOS @@ -32,11 +33,19 @@ struct module; /** * struct gpio_chip - abstract a GPIO controller * @label: for diagnostics + * @dev: optional device providing the GPIOs + * @owner: helps prevent removal of modules exporting active GPIOs + * @request: optional hook for chip-specific activation, such as + * enabling module power and clock; may sleep + * @free: optional hook for chip-specific deactivation, such as + * disabling module power and clock; may sleep * @direction_input: configures signal "offset" as input, or returns error * @get: returns value for signal "offset"; for output signals this * returns either the value actually sensed, or zero * @direction_output: configures signal "offset" as output, or returns error * @set: assigns output value for signal "offset" + * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; + * implementation may not sleep * @dbg_show: optional routine to show contents in debugfs; default code * will be used when this is omitted, but custom code can show extra * state (such as pullup/pulldown configuration). @@ -58,9 +67,15 @@ struct module; * is calculated by subtracting @base from the gpio number. */ struct gpio_chip { - char *label; + const char *label; + struct device *dev; struct module *owner; + int (*request)(struct gpio_chip *chip, + unsigned offset); + void (*free)(struct gpio_chip *chip, + unsigned offset); + int (*direction_input)(struct gpio_chip *chip, unsigned offset); int (*get)(struct gpio_chip *chip, @@ -69,11 +84,16 @@ struct gpio_chip { unsigned offset, int value); void (*set)(struct gpio_chip *chip, unsigned offset, int value); + + int (*to_irq)(struct gpio_chip *chip, + unsigned offset); + void (*dbg_show)(struct seq_file *s, struct gpio_chip *chip); int base; u16 ngpio; unsigned can_sleep:1; + unsigned exported:1; }; extern const char *gpiochip_is_requested(struct gpio_chip *chip, @@ -107,8 +127,20 @@ extern void __gpio_set_value(unsigned gpio, int value); extern int __gpio_cansleep(unsigned gpio); +extern int __gpio_to_irq(unsigned gpio); + +#ifdef CONFIG_GPIO_SYSFS + +/* + * A sysfs interface can be exported by individual drivers if they want, + * but more typically is configured entirely from userspace. + */ +extern int gpio_export(unsigned gpio, bool direction_may_change); +extern void gpio_unexport(unsigned gpio); + +#endif /* CONFIG_GPIO_SYSFS */ -#else +#else /* !CONFIG_HAVE_GPIO_LIB */ static inline int gpio_is_valid(int number) { @@ -137,6 +169,20 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value) gpio_set_value(gpio, value); } -#endif +#endif /* !CONFIG_HAVE_GPIO_LIB */ + +#ifndef CONFIG_GPIO_SYSFS + +/* sysfs support is only available with gpiolib, where it's optional */ + +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + return -ENOSYS; +} + +static inline void gpio_unexport(unsigned gpio) +{ +} +#endif /* CONFIG_GPIO_SYSFS */ #endif /* _ASM_GENERIC_GPIO_H */ diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h index 2609489..f9bc9ac 100644 --- a/include/asm-generic/int-ll64.h +++ b/include/asm-generic/int-ll64.h @@ -26,7 +26,7 @@ typedef unsigned int __u32; #ifdef __GNUC__ __extension__ typedef __signed__ long long __s64; __extension__ typedef unsigned long long __u64; -#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#else typedef __signed__ long long __s64; typedef unsigned long long __u64; #endif diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h index 8641813..15828b2 100644 --- a/include/asm-generic/ioctl.h +++ b/include/asm-generic/ioctl.h @@ -68,12 +68,16 @@ ((nr) << _IOC_NRSHIFT) | \ ((size) << _IOC_SIZESHIFT)) +#ifdef __KERNEL__ /* provoke compile error for invalid uses of size argument */ extern unsigned int __invalid_size_argument_for_IOC; #define _IOC_TYPECHECK(t) \ ((sizeof(t) == sizeof(t[1]) && \ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ sizeof(t) : __invalid_size_argument_for_IOC) +#else +#define _IOC_TYPECHECK(t) (sizeof(t)) +#endif /* used to create numbers */ #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index ed108be..f104af7 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h @@ -22,8 +22,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) fail_fn(count); - else - smp_mb(); } /** @@ -41,10 +39,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); - else { - smp_mb(); - return 0; - } + return 0; } /** @@ -63,7 +58,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - smp_mb(); if (unlikely(atomic_inc_return(count) <= 0)) fail_fn(count); } @@ -88,25 +82,9 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - /* - * We have two variants here. The cmpxchg based one is the best one - * because it never induce a false contention state. It is included - * here because architectures using the inc/dec algorithms over the - * xchg ones are much more likely to support cmpxchg natively. - * - * If not we fall back to the spinlock based variant - that is - * just as efficient (and simpler) as a 'destructive' probing of - * the mutex state would be. - */ -#ifdef __HAVE_ARCH_CMPXCHG - if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { - smp_mb(); + if (likely(atomic_cmpxchg(count, 1, 0) == 1)) return 1; - } return 0; -#else - return fail_fn(count); -#endif } #endif diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index 7b9cd2c..580a6d3 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -27,8 +27,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) fail_fn(count); - else - smp_mb(); } /** @@ -46,10 +44,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) return fail_fn(count); - else { - smp_mb(); - return 0; - } + return 0; } /** @@ -67,7 +62,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - smp_mb(); if (unlikely(atomic_xchg(count, 1) != 0)) fail_fn(count); } @@ -110,7 +104,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) if (prev < 0) prev = 0; } - smp_mb(); return prev; } diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h index 25c10e9..37b3706 100644 --- a/include/asm-generic/pci-dma-compat.h +++ b/include/asm-generic/pci-dma-compat.h @@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, } static inline int -pci_dma_mapping_error(dma_addr_t dma_addr) +pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) { - return dma_mapping_error(dma_addr); + return dma_mapping_error(&pdev->dev, dma_addr); } #endif diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 087325e..a7cdc48 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -5,6 +5,8 @@ #include <asm-generic/pgtable-nopud.h> +struct mm_struct; + #define __PAGETABLE_PMD_FOLDED /* @@ -54,7 +56,9 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) * inside the pud, so has no extra memory associated with it. */ #define pmd_alloc_one(mm, address) NULL -#define pmd_free(mm, x) do { } while (0) +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ +} #define __pmd_free_tlb(tlb, x) do { } while (0) #undef pmd_addr_end diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h index be4af00..89061c1 100644 --- a/include/asm-generic/rtc.h +++ b/include/asm-generic/rtc.h @@ -15,6 +15,7 @@ #include <linux/mc146818rtc.h> #include <linux/rtc.h> #include <linux/bcd.h> +#include <linux/delay.h> #define RTC_PIE 0x40 /* periodic interrupt enable */ #define RTC_AIE 0x20 /* alarm interrupt enable */ @@ -43,7 +44,6 @@ static inline unsigned char rtc_is_updating(void) static inline unsigned int get_rtc_time(struct rtc_time *time) { - unsigned long uip_watchdog = jiffies; unsigned char ctrl; unsigned long flags; @@ -53,19 +53,15 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) /* * read RTC once any update in progress is done. The update - * can take just over 2ms. We wait 10 to 20ms. There is no need to + * can take just over 2ms. We wait 20ms. There is no need to * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. * If you need to know *exactly* when a second has started, enable * periodic update complete interrupts, (via ioctl) and then * immediately read /dev/rtc which will block until you get the IRQ. * Once the read clears, read the RTC time (again via ioctl). Easy. */ - - if (rtc_is_updating() != 0) - while (jiffies - uip_watchdog < 2*HZ/100) { - barrier(); - cpu_relax(); - } + if (rtc_is_updating()) + mdelay(20); /* * Only the values that we read from the RTC are set. We leave @@ -88,12 +84,12 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BCD_TO_BIN(time->tm_sec); - BCD_TO_BIN(time->tm_min); - BCD_TO_BIN(time->tm_hour); - BCD_TO_BIN(time->tm_mday); - BCD_TO_BIN(time->tm_mon); - BCD_TO_BIN(time->tm_year); + time->tm_sec = bcd2bin(time->tm_sec); + time->tm_min = bcd2bin(time->tm_min); + time->tm_hour = bcd2bin(time->tm_hour); + time->tm_mday = bcd2bin(time->tm_mday); + time->tm_mon = bcd2bin(time->tm_mon); + time->tm_year = bcd2bin(time->tm_year); } #ifdef CONFIG_MACH_DECSTATION @@ -163,12 +159,12 @@ static inline int set_rtc_time(struct rtc_time *time) if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(sec); - BIN_TO_BCD(min); - BIN_TO_BCD(hrs); - BIN_TO_BCD(day); - BIN_TO_BCD(mon); - BIN_TO_BCD(yrs); + sec = bin2bcd(sec); + min = bin2bcd(min); + hrs = bin2bcd(hrs); + day = bin2bcd(day); + mon = bin2bcd(mon); + yrs = bin2bcd(yrs); } save_control = CMOS_READ(RTC_CONTROL); diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 8feeae1..79a7ff9 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -14,4 +14,10 @@ extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __start_rodata[], __end_rodata[]; +/* function descriptor handling (if any). Override + * in asm/sections.h */ +#ifndef dereference_function_descriptor +#define dereference_function_descriptor(p) (p) +#endif + #endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index 8786e01..9695701 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -199,6 +199,8 @@ typedef struct siginfo { */ #define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */ #define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */ +#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */ +#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint/watchpoint */ #define NSIGTRAP 2 /* diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h index 1d01043..6129d68 100644 --- a/include/asm-generic/statfs.h +++ b/include/asm-generic/statfs.h @@ -6,33 +6,64 @@ typedef __kernel_fsid_t fsid_t; #endif +/* + * Most 64-bit platforms use 'long', while most 32-bit platforms use '__u32'. + * Yes, they differ in signedness as well as size. + * Special cases can override it for themselves -- except for S390x, which + * is just a little too special for us. And MIPS, which I'm not touching + * with a 10' pole. + */ +#ifndef __statfs_word +#if BITS_PER_LONG == 64 +#define __statfs_word long +#else +#define __statfs_word __u32 +#endif +#endif + struct statfs { - __u32 f_type; - __u32 f_bsize; - __u32 f_blocks; - __u32 f_bfree; - __u32 f_bavail; - __u32 f_files; - __u32 f_ffree; + __statfs_word f_type; + __statfs_word f_bsize; + __statfs_word f_blocks; + __statfs_word f_bfree; + __statfs_word f_bavail; + __statfs_word f_files; + __statfs_word f_ffree; __kernel_fsid_t f_fsid; - __u32 f_namelen; - __u32 f_frsize; - __u32 f_spare[5]; + __statfs_word f_namelen; + __statfs_word f_frsize; + __statfs_word f_spare[5]; }; +/* + * ARM needs to avoid the 32-bit padding at the end, for consistency + * between EABI and OABI + */ +#ifndef ARCH_PACK_STATFS64 +#define ARCH_PACK_STATFS64 +#endif + struct statfs64 { - __u32 f_type; - __u32 f_bsize; + __statfs_word f_type; + __statfs_word f_bsize; __u64 f_blocks; __u64 f_bfree; __u64 f_bavail; __u64 f_files; __u64 f_ffree; __kernel_fsid_t f_fsid; - __u32 f_namelen; - __u32 f_frsize; - __u32 f_spare[5]; -}; + __statfs_word f_namelen; + __statfs_word f_frsize; + __statfs_word f_spare[5]; +} ARCH_PACK_STATFS64; + +/* + * IA64 and x86_64 need to avoid the 32-bit padding at the end, + * to be compatible with the i386 ABI + */ +#ifndef ARCH_PACK_COMPAT_STATFS64 +#define ARCH_PACK_COMPAT_STATFS64 +#endif struct compat_statfs64 { __u32 f_type; @@ -46,6 +77,6 @@ struct compat_statfs64 { __u32 f_namelen; __u32 f_frsize; __u32 f_spare[5]; -}; +} ARCH_PACK_COMPAT_STATFS64; #endif diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h new file mode 100644 index 0000000..ea8087b --- /dev/null +++ b/include/asm-generic/syscall.h @@ -0,0 +1,141 @@ +/* + * Access to user system call parameters and results + * + * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * This file is a stub providing documentation for what functions + * asm-ARCH/syscall.h files need to define. Most arch definitions + * will be simple inlines. + * + * All of these functions expect to be called with no locks, + * and only when the caller is sure that the task of interest + * cannot return to user mode while we are looking at it. + */ + +#ifndef _ASM_SYSCALL_H +#define _ASM_SYSCALL_H 1 + +struct task_struct; +struct pt_regs; + +/** + * syscall_get_nr - find what system call a task is executing + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * If @task is executing a system call or is at system call + * tracing about to attempt one, returns the system call number. + * If @task is not executing a system call, i.e. it's blocked + * inside the kernel for a fault or signal, returns -1. + * + * It's only valid to call this when @task is known to be blocked. + */ +long syscall_get_nr(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_rollback - roll back registers after an aborted system call + * @task: task of interest, must be in system call exit tracing + * @regs: task_pt_regs() of @task + * + * It's only valid to call this when @task is stopped for system + * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), + * after tracehook_report_syscall_entry() returned nonzero to prevent + * the system call from taking place. + * + * This rolls back the register state in @regs so it's as if the + * system call instruction was a no-op. The registers containing + * the system call number and arguments are as they were before the + * system call instruction. This may not be the same as what the + * register state looked like at system call entry tracing. + */ +void syscall_rollback(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_get_error - check result of traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +long syscall_get_error(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_get_return_value - get the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns the return value of the successful system call. + * This value is meaningless if syscall_get_error() returned nonzero. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_set_return_value - change the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @error: negative error code, or zero to indicate success + * @val: user return value if @error is zero + * + * This changes the results of the system call that user mode will see. + * If @error is zero, the user sees a successful system call with a + * return value of @val. If @error is nonzero, it's a negated errno + * code; the user sees a failed system call with this errno code. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val); + +/** + * syscall_get_arguments - extract system call parameter values + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @i: argument index [0,5] + * @n: number of arguments; n+i must be [1,6]. + * @args: array filled with argument values + * + * Fetches @n arguments to the system call starting with the @i'th argument + * (from 0 through 5). Argument @i is stored in @args[0], and so on. + * An arch inline version is probably optimal when @i and @n are constants. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + * It's invalid to call this with @i + @n > 6; we only support system calls + * taking up to 6 arguments. + */ +void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, unsigned long *args); + +/** + * syscall_set_arguments - change system call parameter value + * @task: task of interest, must be in system call entry tracing + * @regs: task_pt_regs() of @task + * @i: argument index [0,5] + * @n: number of arguments; n+i must be [1,6]. + * @args: array of argument values to store + * + * Changes @n arguments to the system call starting with the @i'th argument. + * Argument @i gets value @args[0], and so on. + * An arch inline version is probably optimal when @i and @n are constants. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + * It's invalid to call this with @i + @n > 6; we only support system calls + * taking up to 6 arguments. + */ +void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args); + +#endif /* _ASM_SYSCALL_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 729f6b0..80744606 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -37,6 +37,13 @@ #define MEM_DISCARD(sec) *(.mem##sec) #endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \ + *(__mcount_loc) \ + VMLINUX_SYMBOL(__stop_mcount_loc) = .; +#else +#define MCOUNT_REC() +#endif /* .data section */ #define DATA_DATA \ @@ -52,7 +59,10 @@ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___markers) = .; \ *(__markers) \ - VMLINUX_SYMBOL(__stop___markers) = .; + VMLINUX_SYMBOL(__stop___markers) = .; \ + VMLINUX_SYMBOL(__start___tracepoints) = .; \ + *(__tracepoints) \ + VMLINUX_SYMBOL(__stop___tracepoints) = .; #define RO_DATA(align) \ . = ALIGN((align)); \ @@ -61,6 +71,7 @@ *(.rodata) *(.rodata.*) \ *(__vermagic) /* Kernel version magic */ \ *(__markers_strings) /* Markers: strings */ \ + *(__tracepoints_strings)/* Tracepoints: strings */ \ } \ \ .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ @@ -188,6 +199,7 @@ /* __*init sections */ \ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ *(.ref.rodata) \ + MCOUNT_REC() \ DEV_KEEP(init.rodata) \ DEV_KEEP(exit.rodata) \ CPU_KEEP(init.rodata) \ @@ -221,6 +233,7 @@ * during second ld run in second ld pass when generating System.map */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ + *(.text.hot) \ *(.text) \ *(.ref.text) \ *(.text.init.refok) \ @@ -230,7 +243,8 @@ CPU_KEEP(init.text) \ CPU_KEEP(exit.text) \ MEM_KEEP(init.text) \ - MEM_KEEP(exit.text) + MEM_KEEP(exit.text) \ + *(.text.unlikely) /* sched.text is aling to function alignment to secure we have same @@ -266,7 +280,15 @@ CPU_DISCARD(init.data) \ CPU_DISCARD(init.rodata) \ MEM_DISCARD(init.data) \ - MEM_DISCARD(init.rodata) + MEM_DISCARD(init.rodata) \ + /* implement dynamic printk debug */ \ + VMLINUX_SYMBOL(__start___verbose_strings) = .; \ + *(__verbose_strings) \ + VMLINUX_SYMBOL(__stop___verbose_strings) = .; \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___verbose) = .; \ + *(__verbose) \ + VMLINUX_SYMBOL(__stop___verbose) = .; #define INIT_TEXT \ *(.init.text) \ @@ -331,9 +353,9 @@ #define BUG_TABLE \ . = ALIGN(8); \ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ - __start___bug_table = .; \ + VMLINUX_SYMBOL(__start___bug_table) = .; \ *(__bug_table) \ - __stop___bug_table = .; \ + VMLINUX_SYMBOL(__stop___bug_table) = .; \ } #else #define BUG_TABLE @@ -343,9 +365,9 @@ #define TRACEDATA \ . = ALIGN(4); \ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ - __tracedata_start = .; \ + VMLINUX_SYMBOL(__tracedata_start) = .; \ *(.tracedata) \ - __tracedata_end = .; \ + VMLINUX_SYMBOL(__tracedata_end) = .; \ } #else #define TRACEDATA @@ -359,6 +381,8 @@ } #define INITCALLS \ + *(.initcallearly.init) \ + VMLINUX_SYMBOL(__early_initcall_end) = .; \ *(.initcall0.init) \ *(.initcall0s.init) \ *(.initcall1.init) \ @@ -379,9 +403,10 @@ #define PERCPU(align) \ . = ALIGN(align); \ - __per_cpu_start = .; \ + VMLINUX_SYMBOL(__per_cpu_start) = .; \ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + *(.data.percpu.page_aligned) \ *(.data.percpu) \ *(.data.percpu.shared_aligned) \ } \ - __per_cpu_end = .; + VMLINUX_SYMBOL(__per_cpu_end) = .; |