diff options
Diffstat (limited to 'include')
33 files changed, 932 insertions, 129 deletions
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 485eeb6..979c6a5 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -136,6 +136,32 @@ extern int __gpio_cansleep(unsigned gpio); extern int __gpio_to_irq(unsigned gpio); +#define GPIOF_DIR_OUT (0 << 0) +#define GPIOF_DIR_IN (1 << 0) + +#define GPIOF_INIT_LOW (0 << 1) +#define GPIOF_INIT_HIGH (1 << 1) + +#define GPIOF_IN (GPIOF_DIR_IN) +#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) +#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) + +/** + * struct gpio - a structure describing a GPIO with configuration + * @gpio: the GPIO number + * @flags: GPIO configuration as specified by GPIOF_* + * @label: a literal description string of this GPIO + */ +struct gpio { + unsigned gpio; + unsigned long flags; + const char *label; +}; + +extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); +extern int gpio_request_array(struct gpio *array, size_t num); +extern void gpio_free_array(struct gpio *array, size_t num); + #ifdef CONFIG_GPIO_SYSFS /* diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 89c6249..c809e28 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -74,6 +74,7 @@ struct coredump_params { struct pt_regs *regs; struct file *file; unsigned long limit; + unsigned long mm_flags; }; /* diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 25b8b2f..b793898 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -16,11 +16,13 @@ */ #include <asm/bitops.h> -#define for_each_bit(bit, addr, size) \ +#define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) +/* Temporary */ +#define for_each_bit(bit, addr, size) for_each_set_bit(bit, addr, size) static __inline__ int get_bitmask_order(unsigned int count) { diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h new file mode 100644 index 0000000..0b3414c --- /dev/null +++ b/include/linux/btree-128.h @@ -0,0 +1,109 @@ +extern struct btree_geo btree_geo128; + +struct btree_head128 { struct btree_head h; }; + +static inline void btree_init_mempool128(struct btree_head128 *head, + mempool_t *mempool) +{ + btree_init_mempool(&head->h, mempool); +} + +static inline int btree_init128(struct btree_head128 *head) +{ + return btree_init(&head->h); +} + +static inline void btree_destroy128(struct btree_head128 *head) +{ + btree_destroy(&head->h); +} + +static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void *btree_get_prev128(struct btree_head128 *head, + u64 *k1, u64 *k2) +{ + u64 key[2] = {*k1, *k2}; + void *val; + + val = btree_get_prev(&head->h, &btree_geo128, + (unsigned long *)&key); + *k1 = key[0]; + *k2 = key[1]; + return val; +} + +static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, + void *val, gfp_t gfp) +{ + u64 key[2] = {k1, k2}; + return btree_insert(&head->h, &btree_geo128, + (unsigned long *)&key, val, gfp); +} + +static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2, + void *val) +{ + u64 key[2] = {k1, k2}; + return btree_update(&head->h, &btree_geo128, + (unsigned long *)&key, val); +} + +static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) +{ + u64 key[2]; + void *val; + + val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); + if (val) { + *k1 = key[0]; + *k2 = key[1]; + } + + return val; +} + +static inline int btree_merge128(struct btree_head128 *target, + struct btree_head128 *victim, + gfp_t gfp) +{ + return btree_merge(&target->h, &victim->h, &btree_geo128, gfp); +} + +void visitor128(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func); + +typedef void (*visitor128_t)(void *elem, unsigned long opaque, + u64 key1, u64 key2, size_t index); + +static inline size_t btree_visitor128(struct btree_head128 *head, + unsigned long opaque, + visitor128_t func2) +{ + return btree_visitor(&head->h, &btree_geo128, opaque, + visitor128, func2); +} + +static inline size_t btree_grim_visitor128(struct btree_head128 *head, + unsigned long opaque, + visitor128_t func2) +{ + return btree_grim_visitor(&head->h, &btree_geo128, opaque, + visitor128, func2); +} + +#define btree_for_each_safe128(head, k1, k2, val) \ + for (val = btree_last128(head, &k1, &k2); \ + val; \ + val = btree_get_prev128(head, &k1, &k2)) + diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h new file mode 100644 index 0000000..9a1147e --- /dev/null +++ b/include/linux/btree-type.h @@ -0,0 +1,147 @@ +#define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx +#define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx) +#define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,) +#define BTREE_FN(name) BTREE_TP(btree_ ## name) +#define BTREE_TYPE_HEAD BTREE_TP(struct btree_head) +#define VISITOR_FN BTREE_TP(visitor) +#define VISITOR_FN_T _BTREE_TP(visitor, BTREE_TYPE_SUFFIX, _t) + +BTREE_TYPE_HEAD { + struct btree_head h; +}; + +static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head, + mempool_t *mempool) +{ + btree_init_mempool(&head->h, mempool); +} + +static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head) +{ + return btree_init(&head->h); +} + +static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head) +{ + btree_destroy(&head->h); +} + +static inline int BTREE_FN(merge)(BTREE_TYPE_HEAD *target, + BTREE_TYPE_HEAD *victim, + gfp_t gfp) +{ + return btree_merge(&target->h, &victim->h, BTREE_TYPE_GEO, gfp); +} + +#if (BITS_PER_LONG > BTREE_TYPE_BITS) +static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + unsigned long _key = key; + return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); +} + +static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val, gfp_t gfp) +{ + unsigned long _key = key; + return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); +} + +static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val) +{ + unsigned long _key = key; + return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val); +} + +static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + unsigned long _key = key; + return btree_remove(&head->h, BTREE_TYPE_GEO, &_key); +} + +static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + unsigned long _key; + void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key); + if (val) + *key = _key; + return val; +} + +static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + unsigned long _key = *key; + void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key); + if (val) + *key = _key; + return val; +} +#else +static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); +} + +static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val, gfp_t gfp) +{ + return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, + val, gfp); +} + +static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val) +{ + return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val); +} + +static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); +} + +static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); +} + +static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); +} +#endif + +void VISITOR_FN(void *elem, unsigned long opaque, unsigned long *key, + size_t index, void *__func); + +typedef void (*VISITOR_FN_T)(void *elem, unsigned long opaque, + BTREE_KEYTYPE key, size_t index); + +static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head, + unsigned long opaque, + VISITOR_FN_T func2) +{ + return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque, + visitorl, func2); +} + +static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head, + unsigned long opaque, + VISITOR_FN_T func2) +{ + return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque, + visitorl, func2); +} + +#undef VISITOR_FN +#undef VISITOR_FN_T +#undef __BTREE_TP +#undef _BTREE_TP +#undef BTREE_TP +#undef BTREE_FN +#undef BTREE_TYPE_HEAD +#undef BTREE_TYPE_SUFFIX +#undef BTREE_TYPE_GEO +#undef BTREE_KEYTYPE +#undef BTREE_TYPE_BITS diff --git a/include/linux/btree.h b/include/linux/btree.h new file mode 100644 index 0000000..65b5bb0 --- /dev/null +++ b/include/linux/btree.h @@ -0,0 +1,243 @@ +#ifndef BTREE_H +#define BTREE_H + +#include <linux/kernel.h> +#include <linux/mempool.h> + +/** + * DOC: B+Tree basics + * + * A B+Tree is a data structure for looking up arbitrary (currently allowing + * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure + * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not + * use binary search to find the key on lookups. + * + * Each B+Tree consists of a head, that contains bookkeeping information and + * a variable number (starting with zero) nodes. Each node contains the keys + * and pointers to sub-nodes, or, for leaf nodes, the keys and values for the + * tree entries. + * + * Each node in this implementation has the following layout: + * [key1, key2, ..., keyN] [val1, val2, ..., valN] + * + * Each key here is an array of unsigned longs, geo->no_longs in total. The + * number of keys and values (N) is geo->no_pairs. + */ + +/** + * struct btree_head - btree head + * + * @node: the first node in the tree + * @mempool: mempool used for node allocations + * @height: current of the tree + */ +struct btree_head { + unsigned long *node; + mempool_t *mempool; + int height; +}; + +/* btree geometry */ +struct btree_geo; + +/** + * btree_alloc - allocate function for the mempool + * @gfp_mask: gfp mask for the allocation + * @pool_data: unused + */ +void *btree_alloc(gfp_t gfp_mask, void *pool_data); + +/** + * btree_free - free function for the mempool + * @element: the element to free + * @pool_data: unused + */ +void btree_free(void *element, void *pool_data); + +/** + * btree_init_mempool - initialise a btree with given mempool + * + * @head: the btree head to initialise + * @mempool: the mempool to use + * + * When this function is used, there is no need to destroy + * the mempool. + */ +void btree_init_mempool(struct btree_head *head, mempool_t *mempool); + +/** + * btree_init - initialise a btree + * + * @head: the btree head to initialise + * + * This function allocates the memory pool that the + * btree needs. Returns zero or a negative error code + * (-%ENOMEM) when memory allocation fails. + * + */ +int __must_check btree_init(struct btree_head *head); + +/** + * btree_destroy - destroy mempool + * + * @head: the btree head to destroy + * + * This function destroys the internal memory pool, use only + * when using btree_init(), not with btree_init_mempool(). + */ +void btree_destroy(struct btree_head *head); + +/** + * btree_lookup - look up a key in the btree + * + * @head: the btree to look in + * @geo: the btree geometry + * @key: the key to look up + * + * This function returns the value for the given key, or %NULL. + */ +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_insert - insert an entry into the btree + * + * @head: the btree to add to + * @geo: the btree geometry + * @key: the key to add (must not already be present) + * @val: the value to add (must not be %NULL) + * @gfp: allocation flags for node allocations + * + * This function returns 0 if the item could be added, or an + * error code if it failed (may fail due to memory pressure). + */ +int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val, gfp_t gfp); +/** + * btree_update - update an entry in the btree + * + * @head: the btree to update + * @geo: the btree geometry + * @key: the key to update + * @val: the value to change it to (must not be %NULL) + * + * This function returns 0 if the update was successful, or + * -%ENOENT if the key could not be found. + */ +int btree_update(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val); +/** + * btree_remove - remove an entry from the btree + * + * @head: the btree to update + * @geo: the btree geometry + * @key: the key to remove + * + * This function returns the removed entry, or %NULL if the key + * could not be found. + */ +void *btree_remove(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_merge - merge two btrees + * + * @target: the tree that gets all the entries + * @victim: the tree that gets merged into @target + * @geo: the btree geometry + * @gfp: allocation flags + * + * The two trees @target and @victim may not contain the same keys, + * that is a bug and triggers a BUG(). This function returns zero + * if the trees were merged successfully, and may return a failure + * when memory allocation fails, in which case both trees might have + * been partially merged, i.e. some entries have been moved from + * @victim to @target. + */ +int btree_merge(struct btree_head *target, struct btree_head *victim, + struct btree_geo *geo, gfp_t gfp); + +/** + * btree_last - get last entry in btree + * + * @head: btree head + * @geo: btree geometry + * @key: last key + * + * Returns the last entry in the btree, and sets @key to the key + * of that entry; returns NULL if the tree is empty, in that case + * key is not changed. + */ +void *btree_last(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_get_prev - get previous entry + * + * @head: btree head + * @geo: btree geometry + * @key: pointer to key + * + * The function returns the next item right before the value pointed to by + * @key, and updates @key with its key, or returns %NULL when there is no + * entry with a key smaller than the given key. + */ +void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + + +/* internal use, use btree_visitor{l,32,64,128} */ +size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, size_t index, + void *func2), + void *func2); + +/* internal use, use btree_grim_visitor{l,32,64,128} */ +size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, + size_t index, void *func2), + void *func2); + + +#include <linux/btree-128.h> + +extern struct btree_geo btree_geo32; +#define BTREE_TYPE_SUFFIX l +#define BTREE_TYPE_BITS BITS_PER_LONG +#define BTREE_TYPE_GEO &btree_geo32 +#define BTREE_KEYTYPE unsigned long +#include <linux/btree-type.h> + +#define btree_for_each_safel(head, key, val) \ + for (val = btree_lastl(head, &key); \ + val; \ + val = btree_get_prevl(head, &key)) + +#define BTREE_TYPE_SUFFIX 32 +#define BTREE_TYPE_BITS 32 +#define BTREE_TYPE_GEO &btree_geo32 +#define BTREE_KEYTYPE u32 +#include <linux/btree-type.h> + +#define btree_for_each_safe32(head, key, val) \ + for (val = btree_last32(head, &key); \ + val; \ + val = btree_get_prev32(head, &key)) + +extern struct btree_geo btree_geo64; +#define BTREE_TYPE_SUFFIX 64 +#define BTREE_TYPE_BITS 64 +#define BTREE_TYPE_GEO &btree_geo64 +#define BTREE_KEYTYPE u64 +#include <linux/btree-type.h> + +#define btree_for_each_safe64(head, key, val) \ + for (val = btree_last64(head, &key); \ + val; \ + val = btree_get_prev64(head, &key)) + +#endif diff --git a/include/linux/coredump.h b/include/linux/coredump.h new file mode 100644 index 0000000..b3c91d7 --- /dev/null +++ b/include/linux/coredump.h @@ -0,0 +1,41 @@ +#ifndef _LINUX_COREDUMP_H +#define _LINUX_COREDUMP_H + +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/fs.h> + +/* + * These are the only things you should do on a core-file: use only these + * functions to write out all the necessary info. + */ +static inline int dump_write(struct file *file, const void *addr, int nr) +{ + return file->f_op->write(file, addr, nr, &file->f_pos) == nr; +} + +static inline int dump_seek(struct file *file, loff_t off) +{ + if (file->f_op->llseek && file->f_op->llseek != no_llseek) { + if (file->f_op->llseek(file, off, SEEK_CUR) < 0) + return 0; + } else { + char *buf = (char *)get_zeroed_page(GFP_KERNEL); + + if (!buf) + return 0; + while (off > 0) { + unsigned long n = off; + + if (n > PAGE_SIZE) + n = PAGE_SIZE; + if (!dump_write(file, buf, n)) + return 0; + off -= n; + } + free_page((unsigned long)buf); + } + return 1; +} + +#endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index dbcee76..bae6fe2 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -90,10 +90,10 @@ extern const struct cpumask *const cpu_active_mask; #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) #else -#define num_online_cpus() 1 -#define num_possible_cpus() 1 -#define num_present_cpus() 1 -#define num_active_cpus() 1 +#define num_online_cpus() 1U +#define num_possible_cpus() 1U +#define num_present_cpus() 1U +#define num_active_cpus() 1U #define cpu_online(cpu) ((cpu) == 0) #define cpu_possible(cpu) ((cpu) == 0) #define cpu_present(cpu) ((cpu) == 0) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index d4c9c0b..1381cd9 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -118,10 +118,9 @@ struct dm_dev { /* * Constructors should call these functions to ensure destination devices * are opened/closed correctly. - * FIXME: too many arguments. */ -int dm_get_device(struct dm_target *ti, const char *path, sector_t start, - sector_t len, fmode_t mode, struct dm_dev **result); +int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + struct dm_dev **result); void dm_put_device(struct dm_target *ti, struct dm_dev *d); /* diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index b6bf17e..5c9186b 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -37,14 +37,14 @@ enum dm_io_mem_type { struct dm_io_memory { enum dm_io_mem_type type; + unsigned offset; + union { struct page_list *pl; struct bio_vec *bvec; void *vma; void *addr; } ptr; - - unsigned offset; }; struct dm_io_notify { diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index aa95508..2c445e1 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -266,9 +266,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 16 +#define DM_VERSION_MINOR 17 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2009-11-05)" +#define DM_VERSION_EXTRA "-ioctl (2010-03-05)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ @@ -316,4 +316,9 @@ enum { */ #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ +/* + * If set, a uevent was generated for which the caller may need to wait. + */ +#define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */ + #endif /* _LINUX_DM_IOCTL_H */ diff --git a/include/linux/elf.h b/include/linux/elf.h index ad990c5..5978584 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -50,6 +50,28 @@ typedef __s64 Elf64_Sxword; #define PT_GNU_STACK (PT_LOOS + 0x474e551) +/* + * Extended Numbering + * + * If the real number of program header table entries is larger than + * or equal to PN_XNUM(0xffff), it is set to sh_info field of the + * section header at index 0, and PN_XNUM is set to e_phnum + * field. Otherwise, the section header at index 0 is zero + * initialized, if it exists. + * + * Specifications are available in: + * + * - Sun microsystems: Linker and Libraries. + * Part No: 817-1984-17, September 2008. + * URL: http://docs.sun.com/app/docs/doc/817-1984 + * + * - System V ABI AMD64 Architecture Processor Supplement + * Draft Version 0.99., + * May 11, 2009. + * URL: http://www.x86-64.org/ + */ +#define PN_XNUM 0xffff + /* These constants define the different elf file types */ #define ET_NONE 0 #define ET_REL 1 @@ -286,7 +308,7 @@ typedef struct elf64_phdr { #define SHN_COMMON 0xfff2 #define SHN_HIRESERVE 0xffff -typedef struct { +typedef struct elf32_shdr { Elf32_Word sh_name; Elf32_Word sh_type; Elf32_Word sh_flags; @@ -394,16 +416,20 @@ typedef struct elf64_note { extern Elf32_Dyn _DYNAMIC []; #define elfhdr elf32_hdr #define elf_phdr elf32_phdr +#define elf_shdr elf32_shdr #define elf_note elf32_note #define elf_addr_t Elf32_Off +#define Elf_Half Elf32_Half #else extern Elf64_Dyn _DYNAMIC []; #define elfhdr elf64_hdr #define elf_phdr elf64_phdr +#define elf_shdr elf64_shdr #define elf_note elf64_note #define elf_addr_t Elf64_Off +#define Elf_Half Elf64_Half #endif diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 00d6a68..e687bc3 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -8,6 +8,8 @@ #include <linux/user.h> #endif #include <linux/ptrace.h> +#include <linux/elf.h> +#include <linux/fs.h> struct elf_siginfo { @@ -150,5 +152,20 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse #endif /* __KERNEL__ */ +/* + * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out + * extra segments containing the gate DSO contents. Dumping its + * contents makes post-mortem fully interpretable later without matching up + * the same kernel and hardware config to see what PC values meant. + * Dumping its extra ELF program headers includes all the other information + * a debugger needs to easily find how the gate DSO was being used. + */ +extern Elf_Half elf_core_extra_phdrs(void); +extern int +elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, + unsigned long limit); +extern int +elf_core_write_extra_data(struct file *file, size_t *size, unsigned long limit); +extern size_t elf_core_extra_data_size(void); #endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index dc12f41..a9cd507 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -96,6 +96,7 @@ struct fid { * @fh_to_parent: find the implied object's parent and get a dentry for it * @get_name: find the name for a given inode in a given directory * @get_parent: find the parent of a given directory + * @commit_metadata: commit metadata changes to stable storage * * See Documentation/filesystems/nfs/Exporting for details on how to use * this interface correctly. @@ -137,6 +138,9 @@ struct fid { * is also a directory. In the event that it cannot be found, or storage * space cannot be allocated, a %ERR_PTR should be returned. * + * commit_metadata: + * @commit_metadata should commit metadata changes to stable storage. + * * Locking rules: * get_parent is called with child->d_inode->i_mutex down * get_name is not (which is possibly inconsistent) @@ -152,6 +156,7 @@ struct export_operations { int (*get_name)(struct dentry *parent, char *name, struct dentry *child); struct dentry * (*get_parent)(struct dentry *child); + int (*commit_metadata)(struct inode *inode); }; extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index 875451f..c6dcc1d 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -24,17 +24,17 @@ */ #ifdef CONFIG_FIRMWARE_MEMMAP -int firmware_map_add(u64 start, u64 end, const char *type); int firmware_map_add_early(u64 start, u64 end, const char *type); +int firmware_map_add_hotplug(u64 start, u64 end, const char *type); #else /* CONFIG_FIRMWARE_MEMMAP */ -static inline int firmware_map_add(u64 start, u64 end, const char *type) +static inline int firmware_map_add_early(u64 start, u64 end, const char *type) { return 0; } -static inline int firmware_map_add_early(u64 start, u64 end, const char *type) +static inline int firmware_map_add_hotplug(u64 start, u64 end, const char *type) { return 0; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 4568962..10b8ded 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -60,24 +60,24 @@ struct inodes_stat_t { */ /* file is open for reading */ -#define FMODE_READ ((__force fmode_t)1) +#define FMODE_READ ((__force fmode_t)0x1) /* file is open for writing */ -#define FMODE_WRITE ((__force fmode_t)2) +#define FMODE_WRITE ((__force fmode_t)0x2) /* file is seekable */ -#define FMODE_LSEEK ((__force fmode_t)4) +#define FMODE_LSEEK ((__force fmode_t)0x4) /* file can be accessed using pread */ -#define FMODE_PREAD ((__force fmode_t)8) +#define FMODE_PREAD ((__force fmode_t)0x8) /* file can be accessed using pwrite */ -#define FMODE_PWRITE ((__force fmode_t)16) +#define FMODE_PWRITE ((__force fmode_t)0x10) /* File is opened for execution with sys_execve / sys_uselib */ -#define FMODE_EXEC ((__force fmode_t)32) +#define FMODE_EXEC ((__force fmode_t)0x20) /* File is opened with O_NDELAY (only set for block devices) */ -#define FMODE_NDELAY ((__force fmode_t)64) +#define FMODE_NDELAY ((__force fmode_t)0x40) /* File is opened with O_EXCL (only set for block devices) */ -#define FMODE_EXCL ((__force fmode_t)128) +#define FMODE_EXCL ((__force fmode_t)0x80) /* File is opened using open(.., 3, ..) and is writeable only for ioctls (specialy hack for floppy.c) */ -#define FMODE_WRITE_IOCTL ((__force fmode_t)256) +#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) /* * Don't update ctime and mtime. @@ -85,7 +85,10 @@ struct inodes_stat_t { * Currently a special hack for the XFS open_by_handle ioctl, but we'll * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. */ -#define FMODE_NOCMTIME ((__force fmode_t)2048) +#define FMODE_NOCMTIME ((__force fmode_t)0x800) + +/* Expect random access pattern */ +#define FMODE_RANDOM ((__force fmode_t)0x1000) /* * The below are the various read and write types that we support. Some of diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 557bdad..4c6d413 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -30,7 +30,8 @@ struct vm_area_struct; * _might_ fail. This depends upon the particular VM implementation. * * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. + * cannot handle allocation failures. This modifier is deprecated and no new + * users should be added. * * __GFP_NORETRY: The VM implementation must not retry indefinitely. * @@ -83,6 +84,7 @@ struct vm_area_struct; #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ __GFP_HARDWALL | __GFP_HIGHMEM | \ __GFP_MOVABLE) +#define GFP_IOFS (__GFP_IO | __GFP_FS) #ifdef CONFIG_NUMA #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) @@ -325,7 +327,7 @@ void free_pages_exact(void *virt, size_t size); extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); -extern void free_hot_page(struct page *page); +extern void free_hot_cold_page(struct page *page, int cold); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr),0) @@ -337,9 +339,7 @@ void drain_local_pages(void *dummy); extern gfp_t gfp_allowed_mask; -static inline void set_gfp_allowed_mask(gfp_t mask) -{ - gfp_allowed_mask = mask; -} +extern void set_gfp_allowed_mask(gfp_t mask); +extern gfp_t clear_gfp_allowed_mask(gfp_t mask); #endif /* __LINUX_GFP_H */ diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h index 81736d6..d5c5a60 100644 --- a/include/linux/i2c/pca953x.h +++ b/include/linux/i2c/pca953x.h @@ -1,3 +1,9 @@ +#ifndef _LINUX_PCA953X_H +#define _LINUX_PCA953X_H + +#include <linux/types.h> +#include <linux/i2c.h> + /* platform data for the PCA9539 16-bit I/O expander driver */ struct pca953x_platform_data { @@ -7,6 +13,9 @@ struct pca953x_platform_data { /* initial polarity inversion setting */ uint16_t invert; + /* interrupt base */ + int irq_base; + void *context; /* param to setup/teardown */ int (*setup)(struct i2c_client *client, @@ -17,3 +26,5 @@ struct pca953x_platform_data { void *context); char **names; }; + +#endif /* _LINUX_PCA953X_H */ diff --git a/include/linux/list.h b/include/linux/list.h index 5d9c655..8392884 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -498,7 +498,7 @@ static inline void list_splice_tail_init(struct list_head *list, pos = n, n = list_entry(n->member.next, typeof(*n), member)) /** - * list_for_each_entry_safe_continue + * list_for_each_entry_safe_continue - continue list iteration safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. @@ -514,7 +514,7 @@ static inline void list_splice_tail_init(struct list_head *list, pos = n, n = list_entry(n->member.next, typeof(*n), member)) /** - * list_for_each_entry_safe_from + * list_for_each_entry_safe_from - iterate over list from current point safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. @@ -529,7 +529,7 @@ static inline void list_splice_tail_init(struct list_head *list, pos = n, n = list_entry(n->member.next, typeof(*n), member)) /** - * list_for_each_entry_safe_reverse + * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h index 94cb51a..8895d9d 100644 --- a/include/linux/mfd/mc13783.h +++ b/include/linux/mfd/mc13783.h @@ -26,10 +26,30 @@ int mc13783_irq_request(struct mc13783 *mc13783, int irq, int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq, irq_handler_t handler, const char *name, void *dev); int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev); -int mc13783_ackirq(struct mc13783 *mc13783, int irq); -int mc13783_mask(struct mc13783 *mc13783, int irq); -int mc13783_unmask(struct mc13783 *mc13783, int irq); +int mc13783_irq_mask(struct mc13783 *mc13783, int irq); +int mc13783_irq_unmask(struct mc13783 *mc13783, int irq); +int mc13783_irq_status(struct mc13783 *mc13783, int irq, + int *enabled, int *pending); +int mc13783_irq_ack(struct mc13783 *mc13783, int irq); + +static inline int mc13783_mask(struct mc13783 *mc13783, int irq) __deprecated; +static inline int mc13783_mask(struct mc13783 *mc13783, int irq) +{ + return mc13783_irq_mask(mc13783, irq); +} + +static inline int mc13783_unmask(struct mc13783 *mc13783, int irq) __deprecated; +static inline int mc13783_unmask(struct mc13783 *mc13783, int irq) +{ + return mc13783_irq_unmask(mc13783, irq); +} + +static inline int mc13783_ackirq(struct mc13783 *mc13783, int irq) __deprecated; +static inline int mc13783_ackirq(struct mc13783 *mc13783, int irq) +{ + return mc13783_irq_ack(mc13783, irq); +} #define MC13783_ADC0 43 #define MC13783_ADC0_ADREFEN (1 << 10) diff --git a/include/linux/mm.h b/include/linux/mm.h index 90957f1..3899395 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -870,6 +870,108 @@ extern int mprotect_fixup(struct vm_area_struct *vma, */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); +/* + * per-process(per-mm_struct) statistics. + */ +#if defined(SPLIT_RSS_COUNTING) +/* + * The mm counters are not protected by its page_table_lock, + * so must be incremented atomically. + */ +static inline void set_mm_counter(struct mm_struct *mm, int member, long value) +{ + atomic_long_set(&mm->rss_stat.count[member], value); +} + +unsigned long get_mm_counter(struct mm_struct *mm, int member); + +static inline void add_mm_counter(struct mm_struct *mm, int member, long value) +{ + atomic_long_add(value, &mm->rss_stat.count[member]); +} + +static inline void inc_mm_counter(struct mm_struct *mm, int member) +{ + atomic_long_inc(&mm->rss_stat.count[member]); +} + +static inline void dec_mm_counter(struct mm_struct *mm, int member) +{ + atomic_long_dec(&mm->rss_stat.count[member]); +} + +#else /* !USE_SPLIT_PTLOCKS */ +/* + * The mm counters are protected by its page_table_lock, + * so can be incremented directly. + */ +static inline void set_mm_counter(struct mm_struct *mm, int member, long value) +{ + mm->rss_stat.count[member] = value; +} + +static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) +{ + return mm->rss_stat.count[member]; +} + +static inline void add_mm_counter(struct mm_struct *mm, int member, long value) +{ + mm->rss_stat.count[member] += value; +} + +static inline void inc_mm_counter(struct mm_struct *mm, int member) +{ + mm->rss_stat.count[member]++; +} + +static inline void dec_mm_counter(struct mm_struct *mm, int member) +{ + mm->rss_stat.count[member]--; +} + +#endif /* !USE_SPLIT_PTLOCKS */ + +static inline unsigned long get_mm_rss(struct mm_struct *mm) +{ + return get_mm_counter(mm, MM_FILEPAGES) + + get_mm_counter(mm, MM_ANONPAGES); +} + +static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) +{ + return max(mm->hiwater_rss, get_mm_rss(mm)); +} + +static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) +{ + return max(mm->hiwater_vm, mm->total_vm); +} + +static inline void update_hiwater_rss(struct mm_struct *mm) +{ + unsigned long _rss = get_mm_rss(mm); + + if ((mm)->hiwater_rss < _rss) + (mm)->hiwater_rss = _rss; +} + +static inline void update_hiwater_vm(struct mm_struct *mm) +{ + if (mm->hiwater_vm < mm->total_vm) + mm->hiwater_vm = mm->total_vm; +} + +static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, + struct mm_struct *mm) +{ + unsigned long hiwater_rss = get_mm_hiwater_rss(mm); + + if (*maxrss < hiwater_rss) + *maxrss = hiwater_rss; +} + +void sync_mm_rss(struct task_struct *task, struct mm_struct *mm); /* * A callback you can register to apply pressure to ageable caches. @@ -1114,7 +1216,7 @@ static inline void vma_nonlinear_insert(struct vm_area_struct *vma, /* mmap.c */ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); -extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, +extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 36f9627..048b462 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -24,12 +24,6 @@ struct address_space; #define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) -#if USE_SPLIT_PTLOCKS -typedef atomic_long_t mm_counter_t; -#else /* !USE_SPLIT_PTLOCKS */ -typedef unsigned long mm_counter_t; -#endif /* !USE_SPLIT_PTLOCKS */ - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -169,7 +163,8 @@ struct vm_area_struct { * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ - struct list_head anon_vma_node; /* Serialized by anon_vma->lock */ + struct list_head anon_vma_chain; /* Serialized by mmap_sem & + * page_table_lock */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ /* Function pointers to deal with this struct. */ @@ -201,6 +196,29 @@ struct core_state { struct completion startup; }; +enum { + MM_FILEPAGES, + MM_ANONPAGES, + MM_SWAPENTS, + NR_MM_COUNTERS +}; + +#if USE_SPLIT_PTLOCKS +#define SPLIT_RSS_COUNTING +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; +/* per-thread cached information, */ +struct task_rss_stat { + int events; /* for synchronization threshold */ + int count[NR_MM_COUNTERS]; +}; +#else /* !USE_SPLIT_PTLOCKS */ +struct mm_rss_stat { + unsigned long count[NR_MM_COUNTERS]; +}; +#endif /* !USE_SPLIT_PTLOCKS */ + struct mm_struct { struct vm_area_struct * mmap; /* list of VMAs */ struct rb_root mm_rb; @@ -227,11 +245,6 @@ struct mm_struct { * by mmlist_lock */ - /* Special counters, in some configurations protected by the - * page_table_lock, in other configurations by being atomic. - */ - mm_counter_t _file_rss; - mm_counter_t _anon_rss; unsigned long hiwater_rss; /* High-watermark of RSS usage */ unsigned long hiwater_vm; /* High-water virtual memory usage */ @@ -244,6 +257,12 @@ struct mm_struct { unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ + /* + * Special counters, in some configurations protected by the + * page_table_lock, in other configurations by being atomic. + */ + struct mm_rss_stat rss_stat; + struct linux_binfmt *binfmt; cpumask_t cpu_vm_mask; diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 2ee22e8..d02d2c6 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -99,6 +99,8 @@ struct mmc_card { #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ unsigned int quirks; /* card quirks */ #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ +#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ + /* for byte mode */ u32 raw_cid[4]; /* raw card CID */ u32 raw_csd[4]; /* raw card CSD */ @@ -139,6 +141,11 @@ static inline int mmc_card_lenient_fn0(const struct mmc_card *c) return c->quirks & MMC_QUIRK_LENIENT_FN0; } +static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; +} + #define mmc_card_name(c) ((c)->cid.prod_name) #define mmc_card_id(c) (dev_name(&(c)->dev)) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index eaf3636..43eaf5c 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/mmc/core.h> +#include <linux/mmc/pm.h> struct mmc_ios { unsigned int clock; /* clock rate */ @@ -152,6 +153,8 @@ struct mmc_host { #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ + mmc_pm_flag_t pm_caps; /* supported pm features */ + /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */ @@ -197,6 +200,8 @@ struct mmc_host { struct task_struct *sdio_irq_thread; atomic_t sdio_irq_thread_abort; + mmc_pm_flag_t pm_flags; /* requested pm features */ + #ifdef CONFIG_LEDS_TRIGGERS struct led_trigger *led; /* activity led */ #endif diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h new file mode 100644 index 0000000..d37aac4 --- /dev/null +++ b/include/linux/mmc/pm.h @@ -0,0 +1,30 @@ +/* + * linux/include/linux/mmc/pm.h + * + * Author: Nicolas Pitre + * Copyright: (C) 2009 Marvell Technology Group Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef LINUX_MMC_PM_H +#define LINUX_MMC_PM_H + +/* + * These flags are used to describe power management features that + * some cards (typically SDIO cards) might wish to benefit from when + * the host system is being suspended. There are several layers of + * abstractions involved, from the host controller driver, to the MMC core + * code, to the SDIO core code, to finally get to the actual SDIO function + * driver. This file is therefore used for common definitions shared across + * all those layers. + */ + +typedef unsigned int mmc_pm_flag_t; + +#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */ +#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */ + +#endif diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 47ba464..0ebaef5 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h @@ -95,6 +95,8 @@ #define SDIO_BUS_WIDTH_1BIT 0x00 #define SDIO_BUS_WIDTH_4BIT 0x02 +#define SDIO_BUS_ASYNC_INT 0x20 + #define SDIO_BUS_CD_DISABLE 0x80 /* disable pull-up on DAT3 (pin 1) */ #define SDIO_CCCR_CAPS 0x08 diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index ac3ab68..c6c0cce 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -15,6 +15,8 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> +#include <linux/mmc/pm.h> + struct mmc_card; struct sdio_func; @@ -153,5 +155,8 @@ extern unsigned char sdio_f0_readb(struct sdio_func *func, extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, int *err_ret); +extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func); +extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags); + #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a01a103..bc209d8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -306,6 +306,7 @@ struct zone { * free areas of different sizes */ spinlock_t lock; + int all_unreclaimable; /* All pages pinned */ #ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ seqlock_t span_seqlock; @@ -417,7 +418,6 @@ struct zone { } ____cacheline_internodealigned_in_smp; typedef enum { - ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */ ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ } zone_flags_t; @@ -437,11 +437,6 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) clear_bit(flag, &zone->flags); } -static inline int zone_is_all_unreclaimable(const struct zone *zone) -{ - return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); -} - static inline int zone_is_reclaim_locked(const struct zone *zone) { return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 454997c..c4fa64b 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -69,8 +69,6 @@ * int node_online(node) Is some node online? * int node_possible(node) Is some node possible? * - * int any_online_node(mask) First online node in mask - * * node_set_online(node) set bit 'node' in node_online_map * node_set_offline(node) clear bit 'node' in node_online_map * @@ -467,15 +465,6 @@ static inline int num_node_state(enum node_states state) #define node_online_map node_states[N_ONLINE] #define node_possible_map node_states[N_POSSIBLE] -#define any_online_node(mask) \ -({ \ - int node; \ - for_each_node_mask(node, (mask)) \ - if (node_online(node)) \ - break; \ - node; \ -}) - #define num_online_nodes() num_node_state(N_ONLINE) #define num_possible_nodes() num_node_state(N_POSSIBLE) #define node_online(node) node_state((node), N_ONLINE) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b019ae6..d25bd22 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -37,7 +37,27 @@ struct anon_vma { * is serialized by a system wide lock only visible to * mm_take_all_locks() (mm_all_locks_mutex). */ - struct list_head head; /* List of private "related" vmas */ + struct list_head head; /* Chain of private "related" vmas */ +}; + +/* + * The copy-on-write semantics of fork mean that an anon_vma + * can become associated with multiple processes. Furthermore, + * each child process will have its own anon_vma, where new + * pages for that process are instantiated. + * + * This structure allows us to find the anon_vmas associated + * with a VMA, or the VMAs associated with an anon_vma. + * The "same_vma" list contains the anon_vma_chains linking + * all the anon_vmas associated with this VMA. + * The "same_anon_vma" list contains the anon_vma_chains + * which link all the VMAs associated with this anon_vma. + */ +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ + struct list_head same_anon_vma; /* locked by anon_vma->lock */ }; #ifdef CONFIG_MMU @@ -89,15 +109,23 @@ static inline void anon_vma_unlock(struct vm_area_struct *vma) */ void anon_vma_init(void); /* create anon_vma_cachep */ int anon_vma_prepare(struct vm_area_struct *); -void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); -void anon_vma_unlink(struct vm_area_struct *); -void anon_vma_link(struct vm_area_struct *); +void unlink_anon_vmas(struct vm_area_struct *); +int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); +int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); void __anon_vma_link(struct vm_area_struct *); void anon_vma_free(struct anon_vma *); +static inline void anon_vma_merge(struct vm_area_struct *vma, + struct vm_area_struct *next) +{ + VM_BUG_ON(vma->anon_vma != next->anon_vma); + unlink_anon_vmas(next); +} + /* * rmap interfaces called when adding or removing pte of page */ +void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_file_rmap(struct page *); @@ -181,7 +209,7 @@ static inline int page_referenced(struct page *page, int is_locked, unsigned long *vm_flags) { *vm_flags = 0; - return TestClearPageReferenced(page); + return 0; } #define try_to_unmap(page, refs) SWAP_FAIL diff --git a/include/linux/sched.h b/include/linux/sched.h index 4b1753f..46c6f8d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -396,60 +396,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} #endif -#if USE_SPLIT_PTLOCKS -/* - * The mm counters are not protected by its page_table_lock, - * so must be incremented atomically. - */ -#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) -#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) -#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) -#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) -#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) - -#else /* !USE_SPLIT_PTLOCKS */ -/* - * The mm counters are protected by its page_table_lock, - * so can be incremented directly. - */ -#define set_mm_counter(mm, member, value) (mm)->_##member = (value) -#define get_mm_counter(mm, member) ((mm)->_##member) -#define add_mm_counter(mm, member, value) (mm)->_##member += (value) -#define inc_mm_counter(mm, member) (mm)->_##member++ -#define dec_mm_counter(mm, member) (mm)->_##member-- - -#endif /* !USE_SPLIT_PTLOCKS */ - -#define get_mm_rss(mm) \ - (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) -#define update_hiwater_rss(mm) do { \ - unsigned long _rss = get_mm_rss(mm); \ - if ((mm)->hiwater_rss < _rss) \ - (mm)->hiwater_rss = _rss; \ -} while (0) -#define update_hiwater_vm(mm) do { \ - if ((mm)->hiwater_vm < (mm)->total_vm) \ - (mm)->hiwater_vm = (mm)->total_vm; \ -} while (0) - -static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) -{ - return max(mm->hiwater_rss, get_mm_rss(mm)); -} - -static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, - struct mm_struct *mm) -{ - unsigned long hiwater_rss = get_mm_hiwater_rss(mm); - - if (*maxrss < hiwater_rss) - *maxrss = hiwater_rss; -} - -static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) -{ - return max(mm->hiwater_vm, mm->total_vm); -} extern void set_dumpable(struct mm_struct *mm, int value); extern int get_dumpable(struct mm_struct *mm); @@ -1274,7 +1220,9 @@ struct task_struct { struct plist_node pushable_tasks; struct mm_struct *mm, *active_mm; - +#if defined(SPLIT_RSS_COUNTING) + struct task_rss_stat rss_stat; +#endif /* task state */ int exit_state; int exit_code, exit_signal; diff --git a/include/linux/smp.h b/include/linux/smp.h index 7a0570e..cfa2d20 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -154,7 +154,7 @@ smp_call_function_any(const struct cpumask *mask, void (*func)(void *info), /* * smp_processor_id(): get the current CPU ID. * - * if DEBUG_PREEMPT is enabled the we check whether it is + * if DEBUG_PREEMPT is enabled then we check whether it is * used in a preemption-safe way. (smp_processor_id() is safe * if it's used in a preemption-off critical section, or in * a thread that is bound to the current CPU.) diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h index 6dfd83f..34af0a3 100644 --- a/include/linux/spi/max7301.h +++ b/include/linux/spi/max7301.h @@ -1,9 +1,27 @@ #ifndef LINUX_SPI_MAX7301_H #define LINUX_SPI_MAX7301_H +#include <linux/gpio.h> + +/* + * Some registers must be read back to modify. + * To save time we cache them here in memory + */ +struct max7301 { + struct mutex lock; + u8 port_config[8]; /* field 0 is unused */ + u32 out_level; /* cached output levels */ + struct gpio_chip chip; + struct device *dev; + int (*write)(struct device *dev, unsigned int reg, unsigned int val); + int (*read)(struct device *dev, unsigned int reg); +}; + struct max7301_platform_data { /* number assigned to the first GPIO */ unsigned base; }; +extern int __max730x_remove(struct device *dev); +extern int __max730x_probe(struct max7301 *ts); #endif |