summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/decompress/unlzo.h10
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/i2c/adp5588.h12
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kfifo.h42
-rw-r--r--include/linux/kgdb.h7
-rw-r--r--include/linux/kmemcheck.h110
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/list_sort.h11
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/poison.h16
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/serio.h19
-rw-r--r--include/linux/tty.h4
-rw-r--r--include/linux/uaccess.h4
18 files changed, 156 insertions, 102 deletions
diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h
new file mode 100644
index 0000000..9872297
--- /dev/null
+++ b/include/linux/decompress/unlzo.h
@@ -0,0 +1,10 @@
+#ifndef DECOMPRESS_UNLZO_H
+#define DECOMPRESS_UNLZO_H
+
+int unlzo(unsigned char *inbuf, int len,
+ int(*fill)(void*, unsigned int),
+ int(*flush)(void*, unsigned int),
+ unsigned char *output,
+ int *pos,
+ void(*error)(char *x));
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 211ff44..ab2cc20 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -46,7 +46,7 @@ void kmap_flush_unused(void);
static inline unsigned int nr_free_highpages(void) { return 0; }
-#define totalhigh_pages 0
+#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index fc5db82..02c9af3 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -89,4 +89,16 @@ struct adp5588_kpad_platform_data {
unsigned short unlock_key2; /* Unlock Key 2 */
};
+struct adp5588_gpio_platform_data {
+ unsigned gpio_start; /* GPIO Chip base # */
+ unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
+ int (*setup)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ void *context;
+};
+
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fc9f5a..328bca6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -734,6 +734,10 @@ struct sysinfo {
/* Force a compilation error if condition is constant and true */
#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
+/* Force a compilation error if a constant expression is not a power of 2 */
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
+
/* Force a compilation error if condition is true, but also produce a
result (of value 0 and type size_t), so the expression can be used
e.g. in a structure initializer (or where-ever else comma expressions
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 7c6b32a1..6f6c5f3 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -67,7 +67,7 @@ struct kfifo {
/**
* DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
* @name: name of the declared kfifo datatype
- * @size: size of the fifo buffer
+ * @size: size of the fifo buffer. Must be a power of two.
*
* Note1: the macro can be used inside struct or union declaration
* Note2: the macro creates two objects:
@@ -91,7 +91,7 @@ union { \
/**
* DEFINE_KFIFO - macro to define and initialize a kfifo
* @name: name of the declared kfifo datatype
- * @size: size of the fifo buffer
+ * @size: size of the fifo buffer. Must be a power of two.
*
* Note1: the macro can be used for global and local kfifo data type variables
* Note2: the macro creates two objects:
@@ -104,15 +104,28 @@ union { \
#undef __kfifo_initializer
-extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+extern void kfifo_init(struct kfifo *fifo, void *buffer,
unsigned int size);
extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
gfp_t gfp_mask);
extern void kfifo_free(struct kfifo *fifo);
extern unsigned int kfifo_in(struct kfifo *fifo,
- const unsigned char *from, unsigned int len);
+ const void *from, unsigned int len);
extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
- unsigned char *to, unsigned int len);
+ void *to, unsigned int len);
+extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
+ void *to, unsigned int len, unsigned offset);
+
+/**
+ * kfifo_initialized - Check if kfifo is initialized.
+ * @fifo: fifo to check
+ * Return %true if FIFO is initialized, otherwise %false.
+ * Assumes the fifo was 0 before.
+ */
+static inline bool kfifo_initialized(struct kfifo *fifo)
+{
+ return fifo->buffer != 0;
+}
/**
* kfifo_reset - removes the entire FIFO contents
@@ -194,7 +207,7 @@ static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
* bytes copied.
*/
static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
- const unsigned char *from, unsigned int n, spinlock_t *lock)
+ const void *from, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
@@ -219,7 +232,7 @@ static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
* @to buffer and returns the number of copied bytes.
*/
static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
- unsigned char *to, unsigned int n, spinlock_t *lock)
+ void *to, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
@@ -228,13 +241,6 @@ static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
ret = kfifo_out(fifo, to, n);
- /*
- * optimization: if the FIFO is empty, set the indices to 0
- * so we don't wrap the next time
- */
- if (kfifo_is_empty(fifo))
- kfifo_reset(fifo);
-
spin_unlock_irqrestore(lock, flags);
return ret;
@@ -242,11 +248,11 @@ static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
-extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo,
- const void __user *from, unsigned int n);
+extern __must_check int kfifo_from_user(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned *lenout);
-extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo,
- void __user *to, unsigned int n);
+extern __must_check int kfifo_to_user(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned *lenout);
/*
* __kfifo_add_out internal helper function for updating the out offset
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6adcc29..19ec41a 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -29,8 +29,7 @@ struct pt_regs;
*
* On some architectures it is required to skip a breakpoint
* exception when it occurs after a breakpoint has been removed.
- * This can be implemented in the architecture specific portion of
- * for kgdb.
+ * This can be implemented in the architecture specific portion of kgdb.
*/
extern int kgdb_skipexception(int exception, struct pt_regs *regs);
@@ -65,7 +64,7 @@ struct uart_port;
/**
* kgdb_breakpoint - compiled in breakpoint
*
- * This will be impelmented a static inline per architecture. This
+ * This will be implemented as a static inline per architecture. This
* function is called by the kgdb core to execute an architecture
* specific trap to cause kgdb to enter the exception processing.
*
@@ -190,7 +189,7 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
- * and get them be in a known state. This should do what is needed
+ * and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index e880d4cf9..08d7dc4 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -36,6 +36,56 @@ int kmemcheck_hide_addr(unsigned long address);
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
+/*
+ * Bitfield annotations
+ *
+ * How to use: If you have a struct using bitfields, for example
+ *
+ * struct a {
+ * int x:8, y:8;
+ * };
+ *
+ * then this should be rewritten as
+ *
+ * struct a {
+ * kmemcheck_bitfield_begin(flags);
+ * int x:8, y:8;
+ * kmemcheck_bitfield_end(flags);
+ * };
+ *
+ * Now the "flags_begin" and "flags_end" members may be used to refer to the
+ * beginning and end, respectively, of the bitfield (and things like
+ * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
+ * fields should be annotated:
+ *
+ * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
+ * kmemcheck_annotate_bitfield(a, flags);
+ */
+#define kmemcheck_bitfield_begin(name) \
+ int name##_begin[0];
+
+#define kmemcheck_bitfield_end(name) \
+ int name##_end[0];
+
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do { \
+ int _n; \
+ \
+ if (!ptr) \
+ break; \
+ \
+ _n = (long) &((ptr)->name##_end) \
+ - (long) &((ptr)->name##_begin); \
+ MAYBE_BUILD_BUG_ON(_n < 0); \
+ \
+ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+ } while (0)
+
+#define kmemcheck_annotate_variable(var) \
+ do { \
+ kmemcheck_mark_initialized(&(var), sizeof(var)); \
+ } while (0) \
+
#else
#define kmemcheck_enabled 0
@@ -106,60 +156,16 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
return true;
}
-#endif /* CONFIG_KMEMCHECK */
-
-/*
- * Bitfield annotations
- *
- * How to use: If you have a struct using bitfields, for example
- *
- * struct a {
- * int x:8, y:8;
- * };
- *
- * then this should be rewritten as
- *
- * struct a {
- * kmemcheck_bitfield_begin(flags);
- * int x:8, y:8;
- * kmemcheck_bitfield_end(flags);
- * };
- *
- * Now the "flags_begin" and "flags_end" members may be used to refer to the
- * beginning and end, respectively, of the bitfield (and things like
- * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
- * fields should be annotated:
- *
- * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
- * kmemcheck_annotate_bitfield(a, flags);
- *
- * Note: We provide the same definitions for both kmemcheck and non-
- * kmemcheck kernels. This makes it harder to introduce accidental errors. It
- * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
- */
-#define kmemcheck_bitfield_begin(name) \
- int name##_begin[0];
-
-#define kmemcheck_bitfield_end(name) \
- int name##_end[0];
+#define kmemcheck_bitfield_begin(name)
+#define kmemcheck_bitfield_end(name)
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do { \
+ } while (0)
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- int _n; \
- \
- if (!ptr) \
- break; \
- \
- _n = (long) &((ptr)->name##_end) \
- - (long) &((ptr)->name##_begin); \
- MAYBE_BUILD_BUG_ON(_n < 0); \
- \
- kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+#define kmemcheck_annotate_variable(var) \
+ do { \
} while (0)
-#define kmemcheck_annotate_variable(var) \
- do { \
- kmemcheck_mark_initialized(&(var), sizeof(var)); \
- } while (0) \
+#endif /* CONFIG_KMEMCHECK */
#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6a9c4dd..7311225 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -354,6 +354,9 @@ enum {
/* max tries if error condition is still set after ->error_handler */
ATA_EH_MAX_TRIES = 5,
+ /* sometimes resuming a link requires several retries */
+ ATA_LINK_RESUME_TRIES = 5,
+
/* how hard are we gonna try to probe/recover devices */
ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
new file mode 100644
index 0000000..1a2df2e
--- /dev/null
+++ b/include/linux/list_sort.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_LIST_SORT_H
+#define _LINUX_LIST_SORT_H
+
+#include <linux/types.h>
+
+struct list_head;
+
+void list_sort(void *priv, struct list_head *head,
+ int (*cmp)(void *priv, struct list_head *a,
+ struct list_head *b));
+#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2265f28..60c467b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1089,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
+extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* prio_tree.c */
void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 84d020b..36f9627 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -122,7 +122,7 @@ struct vm_region {
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
- atomic_t vm_usage; /* region usage count */
+ int vm_usage; /* region usage count (access under nommu_region_sem) */
bool vm_icache_flushed : 1; /* true if the icache has been flushed for
* this region */
};
@@ -205,10 +205,12 @@ struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
+#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
+#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long task_size; /* size of task vm space */
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5da0690..174e539 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -243,6 +243,7 @@ struct pci_dev {
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
unsigned int wakeup_prepared:1;
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index b1368b8..7968def 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -447,6 +447,7 @@ struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_device_register(struct phy_device *phy);
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
+int phy_init_hw(struct phy_device *phydev);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
struct phy_device * phy_attach(struct net_device *dev,
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 7fc194a..2110a81 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -2,13 +2,25 @@
#define _LINUX_POISON_H
/********** include/linux/list.h **********/
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8d4991b..6f7bba9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -377,6 +377,8 @@ extern int sysctl_max_map_count;
#include <linux/aio.h>
+#ifdef CONFIG_MMU
+extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -386,6 +388,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long flags);
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
+#else
+static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+#endif
#if USE_SPLIT_PTLOCKS
/*
@@ -2491,8 +2496,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-extern void arch_pick_mmap_layout(struct mm_struct *mm);
-
#ifdef CONFIG_TRACING
extern void
__trace_special(void *__tr, void *__data,
diff --git a/include/linux/serio.h b/include/linux/serio.h
index e2f3044..813d26c 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -136,25 +136,6 @@ static inline void serio_continue_rx(struct serio *serio)
spin_unlock_irq(&serio->lock);
}
-/*
- * Use the following functions to pin serio's driver in process context
- */
-static inline int serio_pin_driver(struct serio *serio)
-{
- return mutex_lock_interruptible(&serio->drv_mutex);
-}
-
-static inline void serio_pin_driver_uninterruptible(struct serio *serio)
-{
- mutex_lock(&serio->drv_mutex);
-}
-
-static inline void serio_unpin_driver(struct serio *serio)
-{
- mutex_unlock(&serio->drv_mutex);
-}
-
-
#endif
/*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index ef3a294..6abfcf5 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -464,7 +464,7 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port);
extern void tty_port_free_xmit_buf(struct tty_port *port);
extern void tty_port_put(struct tty_port *port);
-extern inline struct tty_port *tty_port_get(struct tty_port *port)
+static inline struct tty_port *tty_port_get(struct tty_port *port)
{
if (port)
kref_get(&port->kref);
@@ -486,7 +486,7 @@ extern void tty_port_close(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
extern int tty_port_open(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
-extern inline int tty_port_users(struct tty_port *port)
+static inline int tty_port_users(struct tty_port *port)
{
return port->count + port->blocked_open;
}
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 6b58367..d512d98 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -94,6 +94,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, void *src, size_t size);
+extern long __probe_kernel_read(void *dst, void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
@@ -104,6 +105,7 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-extern long probe_kernel_write(void *dst, void *src, size_t size);
+extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
+extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
#endif /* __LINUX_UACCESS_H__ */
OpenPOWER on IntegriCloud