diff options
author | Anton Altaparmakov <aia21@cantab.net> | 2006-01-19 16:39:33 +0000 |
---|---|---|
committer | Anton Altaparmakov <aia21@cantab.net> | 2006-01-19 16:39:33 +0000 |
commit | 944d79559d154c12becde0dab327016cf438f46c (patch) | |
tree | 50c101806f4d3b6585222dda060559eb4f3e005a /lib | |
parent | d087e4bdd24ebe3ae3d0b265b6573ec901af4b4b (diff) | |
parent | 0f36b018b2e314d45af86449f1a97facb1fbe300 (diff) | |
download | op-kernel-dev-944d79559d154c12becde0dab327016cf438f46c.zip op-kernel-dev-944d79559d154c12becde0dab327016cf438f46c.tar.gz |
Merge branch 'master' of /usr/src/ntfs-2.6/
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 43 | ||||
-rw-r--r-- | lib/bitmap.c | 89 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 49 | ||||
-rw-r--r-- | lib/find_next_bit.c | 3 | ||||
-rw-r--r-- | lib/klist.c | 2 | ||||
-rw-r--r-- | lib/kobject.c | 4 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 349 | ||||
-rw-r--r-- | lib/radix-tree.c | 143 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 18 | ||||
-rw-r--r-- | lib/swiotlb.c | 5 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate.c | 6 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate_syms.c | 2 | ||||
-rw-r--r-- | lib/zlib_inflate/infblock.c | 4 | ||||
-rw-r--r-- | lib/zlib_inflate/infblock.h | 4 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate_syms.c | 2 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate_sync.c | 4 |
16 files changed, 326 insertions, 401 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 156822e..a314e66 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -9,15 +9,9 @@ config PRINTK_TIME in kernel startup. -config DEBUG_KERNEL - bool "Kernel debugging" - help - Say Y here if you are developing drivers or trying to debug and - identify kernel problems. - config MAGIC_SYSRQ bool "Magic SysRq key" - depends on DEBUG_KERNEL && !UML + depends on !UML help If you say Y here, you will have some control over the system even if the system crashes for example during kernel debugging (e.g., you @@ -29,10 +23,16 @@ config MAGIC_SYSRQ keys are documented in <file:Documentation/sysrq.txt>. Don't say Y unless you really know what this hack does. +config DEBUG_KERNEL + bool "Kernel debugging" + help + Say Y here if you are developing drivers or trying to debug and + identify kernel problems. + config LOG_BUF_SHIFT int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL range 12 21 - default 17 if ARCH_S390 + default 17 if S390 default 16 if X86_NUMAQ || IA64 default 15 if SMP default 14 @@ -79,7 +79,7 @@ config SCHEDSTATS config DEBUG_SLAB bool "Debug memory allocations" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && SLAB help Say Y here to have the kernel do limited verification on memory allocation as well as poisoning memory on free to catch use of freed @@ -95,6 +95,14 @@ config DEBUG_PREEMPT if kernel code uses it in a preemption-unsafe way. Also, the kernel will detect preemption count underflows. +config DEBUG_MUTEXES + bool "Mutex debugging, deadlock detection" + default y + depends on DEBUG_KERNEL + help + This allows mutex semantics violations and mutex related deadlocks + (lockups) to be detected and reported automatically. + config DEBUG_SPINLOCK bool "Spinlock debugging" depends on DEBUG_KERNEL @@ -172,7 +180,8 @@ config DEBUG_VM bool "Debug VM" depends on DEBUG_KERNEL help - Enable this to debug the virtual-memory system. + Enable this to turn on extended checks in the virtual-memory system + that may impact performance. If unsure, say N. @@ -186,6 +195,20 @@ config FRAME_POINTER some architectures or if you use external debuggers. If you don't debug the kernel, you can say N. +config FORCED_INLINING + bool "Force gcc to inline functions marked 'inline'" + depends on DEBUG_KERNEL + default y + help + This option determines if the kernel forces gcc to inline the functions + developers have marked 'inline'. Doing so takes away freedom from gcc to + do what it thinks is best, which is desirable for the gcc 3.x series of + compilers. The gcc 4.x series have a rewritten inlining algorithm and + disabling this option will generate a smaller kernel there. Hopefully + this algorithm is so good that allowing gcc4 to make the decision can + become the default in the future, until then this option is there to + test gcc for this. + config RCU_TORTURE_TEST tristate "torture tests for RCU" depends on DEBUG_KERNEL diff --git a/lib/bitmap.c b/lib/bitmap.c index 23d3b11..48e7083 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -519,7 +519,7 @@ EXPORT_SYMBOL(bitmap_parselist); * * Map the bit at position @pos in @buf (of length @bits) to the * ordinal of which set bit it is. If it is not set or if @pos - * is not a valid bit position, map to zero (0). + * is not a valid bit position, map to -1. * * If for example, just bits 4 through 7 are set in @buf, then @pos * values 4 through 7 will get mapped to 0 through 3, respectively, @@ -531,18 +531,19 @@ EXPORT_SYMBOL(bitmap_parselist); */ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) { - int ord = 0; + int i, ord; - if (pos >= 0 && pos < bits) { - int i; + if (pos < 0 || pos >= bits || !test_bit(pos, buf)) + return -1; - for (i = find_first_bit(buf, bits); - i < pos; - i = find_next_bit(buf, bits, i + 1)) - ord++; - if (i > pos) - ord = 0; + i = find_first_bit(buf, bits); + ord = 0; + while (i < pos) { + i = find_next_bit(buf, bits, i + 1); + ord++; } + BUG_ON(i != pos); + return ord; } @@ -553,11 +554,12 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) * @bits: number of valid bit positions in @buf * * Map the ordinal offset of bit @ord in @buf to its position in @buf. - * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0). + * Value of @ord should be in range 0 <= @ord < weight(buf), else + * results are undefined. * * If for example, just bits 4 through 7 are set in @buf, then @ord * values 0 through 3 will get mapped to 4 through 7, respectively, - * and all other @ord valuds will get mapped to 0. When @ord value 3 + * and all other @ord values return undefined values. When @ord value 3 * gets mapped to (returns) @pos value 7 in this example, that means * that the 3rd set bit (starting with 0th) is at position 7 in @buf. * @@ -583,8 +585,8 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) /** * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap - * @src: subset to be remapped * @dst: remapped result + * @src: subset to be remapped * @old: defines domain of map * @new: defines range of map * @bits: number of bits in each of these bitmaps @@ -596,49 +598,42 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) * weight of @old, map the position of the n-th set bit in @old to * the position of the m-th set bit in @new, where m == n % w. * - * If either of the @old and @new bitmaps are empty, or if@src and @dst - * point to the same location, then this routine does nothing. + * If either of the @old and @new bitmaps are empty, or if @src and + * @dst point to the same location, then this routine copies @src + * to @dst. * - * The positions of unset bits in @old are mapped to the position of - * the first set bit in @new. + * The positions of unset bits in @old are mapped to themselves + * (the identify map). * * Apply the above specified mapping to @src, placing the result in * @dst, clearing any bits previously set in @dst. * - * The resulting value of @dst will have either the same weight as - * @src, or less weight in the general case that the mapping wasn't - * injective due to the weight of @new being less than that of @old. - * The resulting value of @dst will never have greater weight than - * that of @src, except perhaps in the case that one of the above - * conditions was not met and this routine just returned. - * * For example, lets say that @old has bits 4 through 7 set, and * @new has bits 12 through 15 set. This defines the mapping of bit * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other - * bit positions to 12 (the first set bit in @new. So if say @src - * comes into this routine with bits 1, 5 and 7 set, then @dst should - * leave with bits 12, 13 and 15 set. + * bit positions unchanged. So if say @src comes into this routine + * with bits 1, 5 and 7 set, then @dst should leave with bits 1, + * 13 and 15 set. */ void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, int bits) { - int s; + int oldbit, w; - if (bitmap_weight(old, bits) == 0) - return; - if (bitmap_weight(new, bits) == 0) - return; if (dst == src) /* following doesn't handle inplace remaps */ return; - bitmap_zero(dst, bits); - for (s = find_first_bit(src, bits); - s < bits; - s = find_next_bit(src, bits, s + 1)) { - int x = bitmap_pos_to_ord(old, s, bits); - int y = bitmap_ord_to_pos(new, x, bits); - set_bit(y, dst); + + w = bitmap_weight(new, bits); + for (oldbit = find_first_bit(src, bits); + oldbit < bits; + oldbit = find_next_bit(src, bits, oldbit + 1)) { + int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) + set_bit(oldbit, dst); /* identity map */ + else + set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); } } EXPORT_SYMBOL(bitmap_remap); @@ -657,8 +652,8 @@ EXPORT_SYMBOL(bitmap_remap); * weight of @old, map the position of the n-th set bit in @old to * the position of the m-th set bit in @new, where m == n % w. * - * The positions of unset bits in @old are mapped to the position of - * the first set bit in @new. + * The positions of unset bits in @old are mapped to themselves + * (the identify map). * * Apply the above specified mapping to bit position @oldbit, returning * the new bit position. @@ -666,14 +661,18 @@ EXPORT_SYMBOL(bitmap_remap); * For example, lets say that @old has bits 4 through 7 set, and * @new has bits 12 through 15 set. This defines the mapping of bit * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other - * bit positions to 12 (the first set bit in @new. So if say @oldbit - * is 5, then this routine returns 13. + * bit positions unchanged. So if say @oldbit is 5, then this routine + * returns 13. */ int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits) { - int x = bitmap_pos_to_ord(old, oldbit, bits); - return bitmap_ord_to_pos(new, x, bits); + int w = bitmap_weight(new, bits); + int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) + return oldbit; + else + return bitmap_ord_to_pos(new, n % w, bits); } EXPORT_SYMBOL(bitmap_bitremap); diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 305a966..a65c314 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -1,47 +1,11 @@ #include <linux/module.h> #include <linux/spinlock.h> #include <asm/atomic.h> -#include <asm/system.h> -#ifdef __HAVE_ARCH_CMPXCHG /* * This is an implementation of the notion of "decrement a * reference count, and return locked if it decremented to zero". * - * This implementation can be used on any architecture that - * has a cmpxchg, and where atomic->value is an int holding - * the value of the atomic (i.e. the high bits aren't used - * for a lock or anything like that). - */ -int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) -{ - int counter; - int newcount; - - for (;;) { - counter = atomic_read(atomic); - newcount = counter - 1; - if (!newcount) - break; /* do it the slow way */ - - newcount = cmpxchg(&atomic->counter, counter, newcount); - if (newcount == counter) - return 0; - } - - spin_lock(lock); - if (atomic_dec_and_test(atomic)) - return 1; - spin_unlock(lock); - return 0; -} -#else -/* - * This is an architecture-neutral, but slow, - * implementation of the notion of "decrement - * a reference count, and return locked if it - * decremented to zero". - * * NOTE NOTE NOTE! This is _not_ equivalent to * * if (atomic_dec_and_test(&atomic)) { @@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) * * because the spin-lock and the decrement must be * "atomic". - * - * This slow version gets the spinlock unconditionally, - * and releases it if it isn't needed. Architectures - * are encouraged to come up with better approaches, - * this is trivially done efficiently using a load-locked - * store-conditional approach, for example. */ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { +#ifdef CONFIG_SMP + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; +#endif + /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; spin_unlock(lock); return 0; } -#endif EXPORT_SYMBOL(_atomic_dec_and_lock); diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index d08302d..c05b4b1 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -10,6 +10,7 @@ */ #include <linux/bitops.h> +#include <linux/module.h> int find_next_bit(const unsigned long *addr, int size, int offset) { @@ -53,3 +54,5 @@ int find_next_bit(const unsigned long *addr, int size, int offset) return offset; } + +EXPORT_SYMBOL(find_next_bit); diff --git a/lib/klist.c b/lib/klist.c index bb2f355..9c94f0b 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -199,6 +199,8 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_ i->i_klist = k; i->i_head = &k->k_list; i->i_cur = n; + if (n) + kref_get(&n->n_ref); } EXPORT_SYMBOL_GPL(klist_iter_init_node); diff --git a/lib/kobject.c b/lib/kobject.c index a181abe..7a0e680 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -207,7 +207,7 @@ int kobject_register(struct kobject * kobj) kobject_name(kobj),error); dump_stack(); } else - kobject_hotplug(kobj, KOBJ_ADD); + kobject_uevent(kobj, KOBJ_ADD); } else error = -EINVAL; return error; @@ -312,7 +312,7 @@ void kobject_del(struct kobject * kobj) void kobject_unregister(struct kobject * kobj) { pr_debug("kobject %s: unregistering\n",kobject_name(kobj)); - kobject_hotplug(kobj, KOBJ_REMOVE); + kobject_uevent(kobj, KOBJ_REMOVE); kobject_del(kobj); kobject_put(kobj); } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 3ab3754..f56e27a 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -19,14 +19,16 @@ #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/string.h> -#include <linux/kobject_uevent.h> #include <linux/kobject.h> #include <net/sock.h> -#define BUFFER_SIZE 1024 /* buffer for the hotplug env */ +#define BUFFER_SIZE 1024 /* buffer for the variables */ #define NUM_ENVP 32 /* number of env pointers */ -#if defined(CONFIG_KOBJECT_UEVENT) || defined(CONFIG_HOTPLUG) +#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) +static DEFINE_SPINLOCK(sequence_lock); +static struct sock *uevent_sock; + static char *action_to_string(enum kobject_action action) { switch (action) { @@ -36,10 +38,6 @@ static char *action_to_string(enum kobject_action action) return "remove"; case KOBJ_CHANGE: return "change"; - case KOBJ_MOUNT: - return "mount"; - case KOBJ_UMOUNT: - return "umount"; case KOBJ_OFFLINE: return "offline"; case KOBJ_ONLINE: @@ -48,306 +46,183 @@ static char *action_to_string(enum kobject_action action) return NULL; } } -#endif - -#ifdef CONFIG_KOBJECT_UEVENT -static struct sock *uevent_sock; /** - * send_uevent - notify userspace by sending event through netlink socket + * kobject_uevent - notify userspace by ending an uevent * - * @signal: signal name - * @obj: object path (kobject) - * @envp: possible hotplug environment to pass with the message - * @gfp_mask: - */ -static int send_uevent(const char *signal, const char *obj, - char **envp, gfp_t gfp_mask) -{ - struct sk_buff *skb; - char *pos; - int len; - - if (!uevent_sock) - return -EIO; - - len = strlen(signal) + 1; - len += strlen(obj) + 1; - - /* allocate buffer with the maximum possible message size */ - skb = alloc_skb(len + BUFFER_SIZE, gfp_mask); - if (!skb) - return -ENOMEM; - - pos = skb_put(skb, len); - sprintf(pos, "%s@%s", signal, obj); - - /* copy the environment key by key to our continuous buffer */ - if (envp) { - int i; - - for (i = 2; envp[i]; i++) { - len = strlen(envp[i]) + 1; - pos = skb_put(skb, len); - strcpy(pos, envp[i]); - } - } - - NETLINK_CB(skb).dst_group = 1; - return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask); -} - -static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, - struct attribute *attr, gfp_t gfp_mask) -{ - char *path; - char *attrpath; - char *signal; - int len; - int rc = -ENOMEM; - - path = kobject_get_path(kobj, gfp_mask); - if (!path) - return -ENOMEM; - - signal = action_to_string(action); - if (!signal) - return -EINVAL; - - if (attr) { - len = strlen(path); - len += strlen(attr->name) + 2; - attrpath = kmalloc(len, gfp_mask); - if (!attrpath) - goto exit; - sprintf(attrpath, "%s/%s", path, attr->name); - rc = send_uevent(signal, attrpath, NULL, gfp_mask); - kfree(attrpath); - } else - rc = send_uevent(signal, path, NULL, gfp_mask); - -exit: - kfree(path); - return rc; -} - -/** - * kobject_uevent - notify userspace by sending event through netlink socket - * - * @signal: signal name - * @kobj: struct kobject that the event is happening to - * @attr: optional struct attribute the event belongs to - */ -int kobject_uevent(struct kobject *kobj, enum kobject_action action, - struct attribute *attr) -{ - return do_kobject_uevent(kobj, action, attr, GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(kobject_uevent); - -int kobject_uevent_atomic(struct kobject *kobj, enum kobject_action action, - struct attribute *attr) -{ - return do_kobject_uevent(kobj, action, attr, GFP_ATOMIC); -} -EXPORT_SYMBOL_GPL(kobject_uevent_atomic); - -static int __init kobject_uevent_init(void) -{ - uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL, - THIS_MODULE); - - if (!uevent_sock) { - printk(KERN_ERR - "kobject_uevent: unable to create netlink socket!\n"); - return -ENODEV; - } - - return 0; -} - -postcore_initcall(kobject_uevent_init); - -#else -static inline int send_uevent(const char *signal, const char *obj, - char **envp, int gfp_mask) -{ - return 0; -} - -#endif /* CONFIG_KOBJECT_UEVENT */ - - -#ifdef CONFIG_HOTPLUG -char hotplug_path[HOTPLUG_PATH_LEN] = "/sbin/hotplug"; -u64 hotplug_seqnum; -static DEFINE_SPINLOCK(sequence_lock); - -/** - * kobject_hotplug - notify userspace by executing /sbin/hotplug - * - * @action: action that is happening (usually "ADD" or "REMOVE") + * @action: action that is happening (usually KOBJ_ADD and KOBJ_REMOVE) * @kobj: struct kobject that the action is happening to */ -void kobject_hotplug(struct kobject *kobj, enum kobject_action action) +void kobject_uevent(struct kobject *kobj, enum kobject_action action) { - char *argv [3]; - char **envp = NULL; - char *buffer = NULL; - char *seq_buff; + char **envp; + char *buffer; char *scratch; + const char *action_string; + const char *devpath = NULL; + const char *subsystem; + struct kobject *top_kobj; + struct kset *kset; + struct kset_uevent_ops *uevent_ops; + u64 seq; + char *seq_buff; int i = 0; int retval; - char *kobj_path = NULL; - const char *name = NULL; - char *action_string; - u64 seq; - struct kobject *top_kobj = kobj; - struct kset *kset; - static struct kset_hotplug_ops null_hotplug_ops; - struct kset_hotplug_ops *hotplug_ops = &null_hotplug_ops; - /* If this kobj does not belong to a kset, - try to find a parent that does. */ + pr_debug("%s\n", __FUNCTION__); + + action_string = action_to_string(action); + if (!action_string) + return; + + /* search the kset we belong to */ + top_kobj = kobj; if (!top_kobj->kset && top_kobj->parent) { do { top_kobj = top_kobj->parent; } while (!top_kobj->kset && top_kobj->parent); } - - if (top_kobj->kset) - kset = top_kobj->kset; - else + if (!top_kobj->kset) return; - if (kset->hotplug_ops) - hotplug_ops = kset->hotplug_ops; + kset = top_kobj->kset; + uevent_ops = kset->uevent_ops; - /* If the kset has a filter operation, call it. - Skip the event, if the filter returns zero. */ - if (hotplug_ops->filter) { - if (!hotplug_ops->filter(kset, kobj)) + /* skip the event, if the filter returns zero. */ + if (uevent_ops && uevent_ops->filter) + if (!uevent_ops->filter(kset, kobj)) return; - } - pr_debug ("%s\n", __FUNCTION__); - - action_string = action_to_string(action); - if (!action_string) - return; - - envp = kmalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL); + /* environment index */ + envp = kzalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL); if (!envp) return; - memset (envp, 0x00, NUM_ENVP * sizeof (char *)); + /* environment values */ buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL); if (!buffer) goto exit; - if (hotplug_ops->name) - name = hotplug_ops->name(kset, kobj); - if (name == NULL) - name = kobject_name(&kset->kobj); + /* complete object path */ + devpath = kobject_get_path(kobj, GFP_KERNEL); + if (!devpath) + goto exit; - argv [0] = hotplug_path; - argv [1] = (char *)name; /* won't be changed but 'const' has to go */ - argv [2] = NULL; + /* originating subsystem */ + if (uevent_ops && uevent_ops->name) + subsystem = uevent_ops->name(kset, kobj); + else + subsystem = kobject_name(&kset->kobj); - /* minimal command environment */ - envp [i++] = "HOME=/"; - envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + /* event environemnt for helper process only */ + envp[i++] = "HOME=/"; + envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + /* default keys */ scratch = buffer; - envp [i++] = scratch; scratch += sprintf(scratch, "ACTION=%s", action_string) + 1; - - kobj_path = kobject_get_path(kobj, GFP_KERNEL); - if (!kobj_path) - goto exit; - envp [i++] = scratch; - scratch += sprintf (scratch, "DEVPATH=%s", kobj_path) + 1; - + scratch += sprintf (scratch, "DEVPATH=%s", devpath) + 1; envp [i++] = scratch; - scratch += sprintf(scratch, "SUBSYSTEM=%s", name) + 1; + scratch += sprintf(scratch, "SUBSYSTEM=%s", subsystem) + 1; - /* reserve space for the sequence, - * put the real one in after the hotplug call */ + /* just reserve the space, overwrite it after kset call has returned */ envp[i++] = seq_buff = scratch; scratch += strlen("SEQNUM=18446744073709551616") + 1; - if (hotplug_ops->hotplug) { - /* have the kset specific function add its stuff */ - retval = hotplug_ops->hotplug (kset, kobj, + /* let the kset specific function add its stuff */ + if (uevent_ops && uevent_ops->uevent) { + retval = uevent_ops->uevent(kset, kobj, &envp[i], NUM_ENVP - i, scratch, BUFFER_SIZE - (scratch - buffer)); if (retval) { - pr_debug ("%s - hotplug() returned %d\n", + pr_debug ("%s - uevent() returned %d\n", __FUNCTION__, retval); goto exit; } } + /* we will send an event, request a new sequence number */ spin_lock(&sequence_lock); - seq = ++hotplug_seqnum; + seq = ++uevent_seqnum; spin_unlock(&sequence_lock); sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq); - pr_debug ("%s: %s %s seq=%llu %s %s %s %s %s\n", - __FUNCTION__, argv[0], argv[1], (unsigned long long)seq, - envp[0], envp[1], envp[2], envp[3], envp[4]); - - send_uevent(action_string, kobj_path, envp, GFP_KERNEL); + /* send netlink message */ + if (uevent_sock) { + struct sk_buff *skb; + size_t len; + + /* allocate message with the maximum possible size */ + len = strlen(action_string) + strlen(devpath) + 2; + skb = alloc_skb(len + BUFFER_SIZE, GFP_KERNEL); + if (skb) { + /* add header */ + scratch = skb_put(skb, len); + sprintf(scratch, "%s@%s", action_string, devpath); + + /* copy keys to our continuous event payload buffer */ + for (i = 2; envp[i]; i++) { + len = strlen(envp[i]) + 1; + scratch = skb_put(skb, len); + strcpy(scratch, envp[i]); + } + + NETLINK_CB(skb).dst_group = 1; + netlink_broadcast(uevent_sock, skb, 0, 1, GFP_KERNEL); + } + } - if (!hotplug_path[0]) - goto exit; + /* call uevent_helper, usually only enabled during early boot */ + if (uevent_helper[0]) { + char *argv [3]; - retval = call_usermodehelper (argv[0], argv, envp, 0); - if (retval) - pr_debug ("%s - call_usermodehelper returned %d\n", - __FUNCTION__, retval); + argv [0] = uevent_helper; + argv [1] = (char *)subsystem; + argv [2] = NULL; + call_usermodehelper (argv[0], argv, envp, 0); + } exit: - kfree(kobj_path); + kfree(devpath); kfree(buffer); kfree(envp); return; } -EXPORT_SYMBOL(kobject_hotplug); +EXPORT_SYMBOL_GPL(kobject_uevent); /** - * add_hotplug_env_var - helper for creating hotplug environment variables + * add_uevent_var - helper for creating event variables * @envp: Pointer to table of environment variables, as passed into - * hotplug() method. + * uevent() method. * @num_envp: Number of environment variable slots available, as - * passed into hotplug() method. + * passed into uevent() method. * @cur_index: Pointer to current index into @envp. It should be - * initialized to 0 before the first call to add_hotplug_env_var(), + * initialized to 0 before the first call to add_uevent_var(), * and will be incremented on success. * @buffer: Pointer to buffer for environment variables, as passed - * into hotplug() method. - * @buffer_size: Length of @buffer, as passed into hotplug() method. + * into uevent() method. + * @buffer_size: Length of @buffer, as passed into uevent() method. * @cur_len: Pointer to current length of space used in @buffer. * Should be initialized to 0 before the first call to - * add_hotplug_env_var(), and will be incremented on success. + * add_uevent_var(), and will be incremented on success. * @format: Format for creating environment variable (of the form * "XXX=%x") for snprintf(). * * Returns 0 if environment variable was added successfully or -ENOMEM * if no space was available. */ -int add_hotplug_env_var(char **envp, int num_envp, int *cur_index, - char *buffer, int buffer_size, int *cur_len, - const char *format, ...) +int add_uevent_var(char **envp, int num_envp, int *cur_index, + char *buffer, int buffer_size, int *cur_len, + const char *format, ...) { va_list args; /* * We check against num_envp - 1 to make sure there is at - * least one slot left after we return, since the hotplug - * method needs to set the last slot to NULL. + * least one slot left after we return, since kobject_uevent() + * needs to set the last slot to NULL. */ if (*cur_index >= num_envp - 1) return -ENOMEM; @@ -366,6 +241,22 @@ int add_hotplug_env_var(char **envp, int num_envp, int *cur_index, (*cur_index)++; return 0; } -EXPORT_SYMBOL(add_hotplug_env_var); +EXPORT_SYMBOL_GPL(add_uevent_var); + +static int __init kobject_uevent_init(void) +{ + uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL, + THIS_MODULE); + + if (!uevent_sock) { + printk(KERN_ERR + "kobject_uevent: unable to create netlink socket!\n"); + return -ENODEV; + } + + return 0; +} + +postcore_initcall(kobject_uevent_init); #endif /* CONFIG_HOTPLUG */ diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 88511c3..c0bd4a9 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -137,18 +137,31 @@ out: static inline void tag_set(struct radix_tree_node *node, int tag, int offset) { - if (!test_bit(offset, &node->tags[tag][0])) - __set_bit(offset, &node->tags[tag][0]); + __set_bit(offset, node->tags[tag]); } static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) { - __clear_bit(offset, &node->tags[tag][0]); + __clear_bit(offset, node->tags[tag]); } static inline int tag_get(struct radix_tree_node *node, int tag, int offset) { - return test_bit(offset, &node->tags[tag][0]); + return test_bit(offset, node->tags[tag]); +} + +/* + * Returns 1 if any slot in the node has this tag set. + * Otherwise returns 0. + */ +static inline int any_tag_set(struct radix_tree_node *node, int tag) +{ + int idx; + for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { + if (node->tags[tag][idx]) + return 1; + } + return 0; } /* @@ -185,15 +198,9 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) * into the newly-pushed top-level node(s) */ for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { - int idx; - tags[tag] = 0; - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (root->rnode->tags[tag][idx]) { - tags[tag] = 1; - break; - } - } + if (any_tag_set(root->rnode, tag)) + tags[tag] = 1; } do { @@ -246,7 +253,7 @@ int radix_tree_insert(struct radix_tree_root *root, shift = (height-1) * RADIX_TREE_MAP_SHIFT; offset = 0; /* uninitialised var warning */ - while (height > 0) { + do { if (slot == NULL) { /* Have to add a child node. */ if (!(slot = radix_tree_node_alloc(root))) @@ -264,18 +271,16 @@ int radix_tree_insert(struct radix_tree_root *root, slot = node->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; - } + } while (height > 0); if (slot != NULL) return -EEXIST; - if (node) { - node->count++; - node->slots[offset] = item; - BUG_ON(tag_get(node, 0, offset)); - BUG_ON(tag_get(node, 1, offset)); - } else - root->rnode = item; + BUG_ON(!node); + node->count++; + node->slots[offset] = item; + BUG_ON(tag_get(node, 0, offset)); + BUG_ON(tag_get(node, 1, offset)); return 0; } @@ -367,7 +372,8 @@ void *radix_tree_tag_set(struct radix_tree_root *root, int offset; offset = (index >> shift) & RADIX_TREE_MAP_MASK; - tag_set(slot, tag, offset); + if (!tag_get(slot, tag, offset)) + tag_set(slot, tag, offset); slot = slot->slots[offset]; BUG_ON(slot == NULL); shift -= RADIX_TREE_MAP_SHIFT; @@ -427,13 +433,11 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, goto out; do { - int idx; - + if (!tag_get(pathp->node, tag, pathp->offset)) + goto out; tag_clear(pathp->node, tag, pathp->offset); - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (pathp->node->tags[tag][idx]) - goto out; - } + if (any_tag_set(pathp->node, tag)) + goto out; pathp--; } while (pathp->node); out: @@ -674,6 +678,29 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, EXPORT_SYMBOL(radix_tree_gang_lookup_tag); /** + * radix_tree_shrink - shrink height of a radix tree to minimal + * @root radix tree root + */ +static inline void radix_tree_shrink(struct radix_tree_root *root) +{ + /* try to shrink tree height */ + while (root->height > 1 && + root->rnode->count == 1 && + root->rnode->slots[0]) { + struct radix_tree_node *to_free = root->rnode; + + root->rnode = to_free->slots[0]; + root->height--; + /* must only free zeroed nodes into the slab */ + tag_clear(to_free, 0, 0); + tag_clear(to_free, 1, 0); + to_free->slots[0] = NULL; + to_free->count = 0; + radix_tree_node_free(to_free); + } +} + +/** * radix_tree_delete - delete an item from a radix tree * @root: radix tree root * @index: index key @@ -691,6 +718,8 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) void *ret = NULL; char tags[RADIX_TREE_TAGS]; int nr_cleared_tags; + int tag; + int offset; height = root->height; if (index > radix_tree_maxindex(height)) @@ -701,16 +730,14 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) slot = root->rnode; for ( ; height > 0; height--) { - int offset; - if (slot == NULL) goto out; + pathp++; offset = (index >> shift) & RADIX_TREE_MAP_MASK; - pathp[1].offset = offset; - pathp[1].node = slot; + pathp->offset = offset; + pathp->node = slot; slot = slot->slots[offset]; - pathp++; shift -= RADIX_TREE_MAP_SHIFT; } @@ -723,35 +750,39 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) /* * Clear all tags associated with the just-deleted item */ - memset(tags, 0, sizeof(tags)); - do { - int tag; + nr_cleared_tags = 0; + for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + if (tag_get(pathp->node, tag, pathp->offset)) { + tag_clear(pathp->node, tag, pathp->offset); + tags[tag] = 0; + nr_cleared_tags++; + } else + tags[tag] = 1; + } - nr_cleared_tags = RADIX_TREE_TAGS; + for (pathp--; nr_cleared_tags && pathp->node; pathp--) { for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { - int idx; - if (tags[tag]) continue; tag_clear(pathp->node, tag, pathp->offset); - - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (pathp->node->tags[tag][idx]) { - tags[tag] = 1; - nr_cleared_tags--; - break; - } + if (any_tag_set(pathp->node, tag)) { + tags[tag] = 1; + nr_cleared_tags--; } } - pathp--; - } while (pathp->node && nr_cleared_tags); + } /* Now free the nodes we do not need anymore */ for (pathp = orig_pathp; pathp->node; pathp--) { pathp->node->slots[pathp->offset] = NULL; - if (--pathp->node->count) + pathp->node->count--; + + if (pathp->node->count) { + if (pathp->node == root->rnode) + radix_tree_shrink(root); goto out; + } /* Node with zero slots in use so free it */ radix_tree_node_free(pathp->node); @@ -770,15 +801,11 @@ EXPORT_SYMBOL(radix_tree_delete); */ int radix_tree_tagged(struct radix_tree_root *root, int tag) { - int idx; - - if (!root->rnode) - return 0; - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (root->rnode->tags[tag][idx]) - return 1; - } - return 0; + struct radix_tree_node *rnode; + rnode = root->rnode; + if (!rnode) + return 0; + return any_tag_set(rnode, tag); } EXPORT_SYMBOL(radix_tree_tagged); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index dcd4be9..c8bb8cc 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -19,10 +19,11 @@ static void spin_bug(spinlock_t *lock, const char *msg) if (xchg(&print_once, 0)) { if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) owner = lock->owner; - printk("BUG: spinlock %s on CPU#%d, %s/%d\n", + printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", msg, raw_smp_processor_id(), current->comm, current->pid); - printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n", + printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " + ".owner_cpu: %d\n", lock, lock->magic, owner ? owner->comm : "<none>", owner ? owner->pid : -1, @@ -78,7 +79,8 @@ static void __spin_lock_debug(spinlock_t *lock) /* lockup suspected: */ if (print_once) { print_once = 0; - printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n", + printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " + "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); @@ -120,8 +122,8 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) static long print_once = 1; if (xchg(&print_once, 0)) { - printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, - raw_smp_processor_id(), current->comm, + printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", + msg, raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); #ifdef CONFIG_SMP @@ -149,7 +151,8 @@ static void __read_lock_debug(rwlock_t *lock) /* lockup suspected: */ if (print_once) { print_once = 0; - printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n", + printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " + "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); @@ -221,7 +224,8 @@ static void __write_lock_debug(rwlock_t *lock) /* lockup suspected: */ if (print_once) { print_once = 0; - printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n", + printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " + "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1ff8dce..0af497b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -142,8 +142,7 @@ swiotlb_init_with_default_size (size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * - (1 << IO_TLB_SHIFT), 0x100000000); + io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); @@ -464,7 +463,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ dma_addr_t handle; handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (dma_mapping_error(handle)) + if (swiotlb_dma_mapping_error(handle)) return NULL; ret = phys_to_virt(handle); diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index ad9a1bf..1653dd9 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c @@ -255,6 +255,7 @@ int zlib_deflateInit2_( } /* ========================================================================= */ +#if 0 int zlib_deflateSetDictionary( z_streamp strm, const Byte *dictionary, @@ -297,6 +298,7 @@ int zlib_deflateSetDictionary( if (hash_head) hash_head = 0; /* to make compiler happy */ return Z_OK; } +#endif /* 0 */ /* ========================================================================= */ int zlib_deflateReset( @@ -330,6 +332,7 @@ int zlib_deflateReset( } /* ========================================================================= */ +#if 0 int zlib_deflateParams( z_streamp strm, int level, @@ -365,6 +368,7 @@ int zlib_deflateParams( s->strategy = strategy; return err; } +#endif /* 0 */ /* ========================================================================= * Put a short in the pending buffer. The 16-bit value is put in MSB order. @@ -572,6 +576,7 @@ int zlib_deflateEnd( /* ========================================================================= * Copy the source state to the destination state. */ +#if 0 int zlib_deflateCopy ( z_streamp dest, z_streamp source @@ -624,6 +629,7 @@ int zlib_deflateCopy ( return Z_OK; #endif } +#endif /* 0 */ /* =========================================================================== * Read a new buffer from the current input stream, update the adler32 diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c index 5985b28..767b573 100644 --- a/lib/zlib_deflate/deflate_syms.c +++ b/lib/zlib_deflate/deflate_syms.c @@ -16,6 +16,4 @@ EXPORT_SYMBOL(zlib_deflateInit_); EXPORT_SYMBOL(zlib_deflateInit2_); EXPORT_SYMBOL(zlib_deflateEnd); EXPORT_SYMBOL(zlib_deflateReset); -EXPORT_SYMBOL(zlib_deflateCopy); -EXPORT_SYMBOL(zlib_deflateParams); MODULE_LICENSE("GPL"); diff --git a/lib/zlib_inflate/infblock.c b/lib/zlib_inflate/infblock.c index 50f21ca4..c16cdef 100644 --- a/lib/zlib_inflate/infblock.c +++ b/lib/zlib_inflate/infblock.c @@ -338,6 +338,7 @@ int zlib_inflate_blocks_free( } +#if 0 void zlib_inflate_set_dictionary( inflate_blocks_statef *s, const Byte *d, @@ -347,15 +348,18 @@ void zlib_inflate_set_dictionary( memcpy(s->window, d, n); s->read = s->write = s->window + n; } +#endif /* 0 */ /* Returns true if inflate is currently at the end of a block generated * by Z_SYNC_FLUSH or Z_FULL_FLUSH. * IN assertion: s != NULL */ +#if 0 int zlib_inflate_blocks_sync_point( inflate_blocks_statef *s ) { return s->mode == LENS; } +#endif /* 0 */ diff --git a/lib/zlib_inflate/infblock.h b/lib/zlib_inflate/infblock.h index f5221dd..ceee60b 100644 --- a/lib/zlib_inflate/infblock.h +++ b/lib/zlib_inflate/infblock.h @@ -33,12 +33,16 @@ extern int zlib_inflate_blocks_free ( inflate_blocks_statef *, z_streamp); +#if 0 extern void zlib_inflate_set_dictionary ( inflate_blocks_statef *s, const Byte *d, /* dictionary */ uInt n); /* dictionary length */ +#endif /* 0 */ +#if 0 extern int zlib_inflate_blocks_sync_point ( inflate_blocks_statef *s); +#endif /* 0 */ #endif /* _INFBLOCK_H */ diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c index aa1b081..ef49738 100644 --- a/lib/zlib_inflate/inflate_syms.c +++ b/lib/zlib_inflate/inflate_syms.c @@ -15,8 +15,6 @@ EXPORT_SYMBOL(zlib_inflate); EXPORT_SYMBOL(zlib_inflateInit_); EXPORT_SYMBOL(zlib_inflateInit2_); EXPORT_SYMBOL(zlib_inflateEnd); -EXPORT_SYMBOL(zlib_inflateSync); EXPORT_SYMBOL(zlib_inflateReset); -EXPORT_SYMBOL(zlib_inflateSyncPoint); EXPORT_SYMBOL(zlib_inflateIncomp); MODULE_LICENSE("GPL"); diff --git a/lib/zlib_inflate/inflate_sync.c b/lib/zlib_inflate/inflate_sync.c index e07bdb2..61411ff 100644 --- a/lib/zlib_inflate/inflate_sync.c +++ b/lib/zlib_inflate/inflate_sync.c @@ -7,6 +7,7 @@ #include "infblock.h" #include "infutil.h" +#if 0 int zlib_inflateSync( z_streamp z ) @@ -57,6 +58,7 @@ int zlib_inflateSync( z->state->mode = BLOCKS; return Z_OK; } +#endif /* 0 */ /* Returns true if inflate is currently at the end of a block generated @@ -66,6 +68,7 @@ int zlib_inflateSync( * decompressing, PPP checks that at the end of input packet, inflate is * waiting for these length bytes. */ +#if 0 int zlib_inflateSyncPoint( z_streamp z ) @@ -74,6 +77,7 @@ int zlib_inflateSyncPoint( return Z_STREAM_ERROR; return zlib_inflate_blocks_sync_point(z->state->blocks); } +#endif /* 0 */ /* * This subroutine adds the data at next_in/avail_in to the output history |